2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
57 /* ----- HCI requests ----- */
59 #define HCI_REQ_DONE 0
60 #define HCI_REQ_PEND 1
61 #define HCI_REQ_CANCELED 2
63 #define hci_req_lock(d) mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
66 /* ---- HCI notifications ---- */
68 static void hci_notify(struct hci_dev *hdev, int event)
70 hci_sock_dev_event(hdev, event);
73 /* ---- HCI debugfs entries ---- */
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
78 struct hci_dev *hdev = file->private_data;
81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
90 struct hci_dev *hdev = file->private_data;
93 size_t buf_size = min(count, (sizeof(buf)-1));
97 if (!test_bit(HCI_UP, &hdev->flags))
100 if (copy_from_user(buf, user_buf, buf_size))
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 hci_req_unlock(hdev);
122 err = -bt_to_errno(skb->data[0]);
128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
133 static const struct file_operations dut_mode_fops = {
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
140 static int features_show(struct seq_file *f, void *ptr)
142 struct hci_dev *hdev = f->private;
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
161 hci_dev_unlock(hdev);
166 static int features_open(struct inode *inode, struct file *file)
168 return single_open(file, features_show, inode->i_private);
171 static const struct file_operations features_fops = {
172 .open = features_open,
175 .release = single_release,
178 static int blacklist_show(struct seq_file *f, void *p)
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
184 list_for_each_entry(b, &hdev->blacklist, list)
185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186 hci_dev_unlock(hdev);
191 static int blacklist_open(struct inode *inode, struct file *file)
193 return single_open(file, blacklist_show, inode->i_private);
196 static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
200 .release = single_release,
203 static int whitelist_show(struct seq_file *f, void *p)
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
216 static int whitelist_open(struct inode *inode, struct file *file)
218 return single_open(file, whitelist_show, inode->i_private);
221 static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
225 .release = single_release,
228 static int uuids_show(struct seq_file *f, void *p)
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
234 list_for_each_entry(uuid, &hdev->uuids, list) {
237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
244 seq_printf(f, "%pUb\n", val);
246 hci_dev_unlock(hdev);
251 static int uuids_open(struct inode *inode, struct file *file)
253 return single_open(file, uuids_show, inode->i_private);
256 static const struct file_operations uuids_fops = {
260 .release = single_release,
263 static int inquiry_cache_show(struct seq_file *f, void *p)
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
282 hci_dev_unlock(hdev);
287 static int inquiry_cache_open(struct inode *inode, struct file *file)
289 return single_open(file, inquiry_cache_show, inode->i_private);
292 static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
296 .release = single_release,
299 static int link_keys_show(struct seq_file *f, void *ptr)
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
310 hci_dev_unlock(hdev);
315 static int link_keys_open(struct inode *inode, struct file *file)
317 return single_open(file, link_keys_show, inode->i_private);
320 static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
324 .release = single_release,
327 static int dev_class_show(struct seq_file *f, void *ptr)
329 struct hci_dev *hdev = f->private;
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
339 static int dev_class_open(struct inode *inode, struct file *file)
341 return single_open(file, dev_class_show, inode->i_private);
344 static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
348 .release = single_release,
351 static int voice_setting_get(void *data, u64 *val)
353 struct hci_dev *hdev = data;
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
362 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
365 static int auto_accept_delay_set(void *data, u64 val)
367 struct hci_dev *hdev = data;
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
376 static int auto_accept_delay_get(void *data, u64 *val)
378 struct hci_dev *hdev = data;
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
387 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
390 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
393 struct hci_dev *hdev = file->private_data;
396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
402 static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
406 struct hci_dev *hdev = file->private_data;
408 size_t buf_size = min(count, (sizeof(buf)-1));
411 if (test_bit(HCI_UP, &hdev->flags))
414 if (copy_from_user(buf, user_buf, buf_size))
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
429 static const struct file_operations force_sc_support_fops = {
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
436 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
439 struct hci_dev *hdev = file->private_data;
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
448 static const struct file_operations sc_only_mode_fops = {
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
454 static int idle_timeout_set(void *data, u64 val)
456 struct hci_dev *hdev = data;
458 if (val != 0 && (val < 500 || val > 3600000))
462 hdev->idle_timeout = val;
463 hci_dev_unlock(hdev);
468 static int idle_timeout_get(void *data, u64 *val)
470 struct hci_dev *hdev = data;
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
479 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
482 static int rpa_timeout_set(void *data, u64 val)
484 struct hci_dev *hdev = data;
486 /* Require the RPA timeout to be at least 30 seconds and at most
489 if (val < 30 || val > (60 * 60 * 24))
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
499 static int rpa_timeout_get(void *data, u64 *val)
501 struct hci_dev *hdev = data;
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
510 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
513 static int sniff_min_interval_set(void *data, u64 val)
515 struct hci_dev *hdev = data;
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
521 hdev->sniff_min_interval = val;
522 hci_dev_unlock(hdev);
527 static int sniff_min_interval_get(void *data, u64 *val)
529 struct hci_dev *hdev = data;
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
538 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
541 static int sniff_max_interval_set(void *data, u64 val)
543 struct hci_dev *hdev = data;
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
549 hdev->sniff_max_interval = val;
550 hci_dev_unlock(hdev);
555 static int sniff_max_interval_get(void *data, u64 *val)
557 struct hci_dev *hdev = data;
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
566 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
569 static int conn_info_min_age_set(void *data, u64 val)
571 struct hci_dev *hdev = data;
573 if (val == 0 || val > hdev->conn_info_max_age)
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
583 static int conn_info_min_age_get(void *data, u64 *val)
585 struct hci_dev *hdev = data;
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
594 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
597 static int conn_info_max_age_set(void *data, u64 val)
599 struct hci_dev *hdev = data;
601 if (val == 0 || val < hdev->conn_info_min_age)
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
611 static int conn_info_max_age_get(void *data, u64 *val)
613 struct hci_dev *hdev = data;
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
622 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
625 static int identity_show(struct seq_file *f, void *p)
627 struct hci_dev *hdev = f->private;
633 hci_copy_identity_address(hdev, &addr, &addr_type);
635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
636 16, hdev->irk, &hdev->rpa);
638 hci_dev_unlock(hdev);
643 static int identity_open(struct inode *inode, struct file *file)
645 return single_open(file, identity_show, inode->i_private);
648 static const struct file_operations identity_fops = {
649 .open = identity_open,
652 .release = single_release,
655 static int random_address_show(struct seq_file *f, void *p)
657 struct hci_dev *hdev = f->private;
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
666 static int random_address_open(struct inode *inode, struct file *file)
668 return single_open(file, random_address_show, inode->i_private);
671 static const struct file_operations random_address_fops = {
672 .open = random_address_open,
675 .release = single_release,
678 static int static_address_show(struct seq_file *f, void *p)
680 struct hci_dev *hdev = f->private;
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
689 static int static_address_open(struct inode *inode, struct file *file)
691 return single_open(file, static_address_show, inode->i_private);
694 static const struct file_operations static_address_fops = {
695 .open = static_address_open,
698 .release = single_release,
701 static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
705 struct hci_dev *hdev = file->private_data;
708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
714 static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
718 struct hci_dev *hdev = file->private_data;
720 size_t buf_size = min(count, (sizeof(buf)-1));
723 if (test_bit(HCI_UP, &hdev->flags))
726 if (copy_from_user(buf, user_buf, buf_size))
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
741 static const struct file_operations force_static_address_fops = {
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
748 static int white_list_show(struct seq_file *f, void *ptr)
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
761 static int white_list_open(struct inode *inode, struct file *file)
763 return single_open(file, white_list_show, inode->i_private);
766 static const struct file_operations white_list_fops = {
767 .open = white_list_open,
770 .release = single_release,
773 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
785 hci_dev_unlock(hdev);
790 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
792 return single_open(file, identity_resolving_keys_show,
796 static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
800 .release = single_release,
803 static int long_term_keys_show(struct seq_file *f, void *ptr)
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
809 list_for_each_safe(p, n, &hdev->long_term_keys) {
810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
812 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
814 __le64_to_cpu(ltk->rand), 16, ltk->val);
816 hci_dev_unlock(hdev);
821 static int long_term_keys_open(struct inode *inode, struct file *file)
823 return single_open(file, long_term_keys_show, inode->i_private);
826 static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
830 .release = single_release,
833 static int conn_min_interval_set(void *data, u64 val)
835 struct hci_dev *hdev = data;
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
841 hdev->le_conn_min_interval = val;
842 hci_dev_unlock(hdev);
847 static int conn_min_interval_get(void *data, u64 *val)
849 struct hci_dev *hdev = data;
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
858 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
861 static int conn_max_interval_set(void *data, u64 val)
863 struct hci_dev *hdev = data;
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
869 hdev->le_conn_max_interval = val;
870 hci_dev_unlock(hdev);
875 static int conn_max_interval_get(void *data, u64 *val)
877 struct hci_dev *hdev = data;
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
886 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
889 static int conn_latency_set(void *data, u64 val)
891 struct hci_dev *hdev = data;
897 hdev->le_conn_latency = val;
898 hci_dev_unlock(hdev);
903 static int conn_latency_get(void *data, u64 *val)
905 struct hci_dev *hdev = data;
908 *val = hdev->le_conn_latency;
909 hci_dev_unlock(hdev);
914 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
917 static int supervision_timeout_set(void *data, u64 val)
919 struct hci_dev *hdev = data;
921 if (val < 0x000a || val > 0x0c80)
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
931 static int supervision_timeout_get(void *data, u64 *val)
933 struct hci_dev *hdev = data;
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
942 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
945 static int adv_channel_map_set(void *data, u64 val)
947 struct hci_dev *hdev = data;
949 if (val < 0x01 || val > 0x07)
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
959 static int adv_channel_map_get(void *data, u64 *val)
961 struct hci_dev *hdev = data;
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
970 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
973 static int device_list_show(struct seq_file *f, void *ptr)
975 struct hci_dev *hdev = f->private;
976 struct hci_conn_params *p;
979 list_for_each_entry(p, &hdev->le_conn_params, list) {
980 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
983 hci_dev_unlock(hdev);
988 static int device_list_open(struct inode *inode, struct file *file)
990 return single_open(file, device_list_show, inode->i_private);
993 static const struct file_operations device_list_fops = {
994 .open = device_list_open,
997 .release = single_release,
1000 /* ---- HCI requests ---- */
1002 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1004 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1006 if (hdev->req_status == HCI_REQ_PEND) {
1007 hdev->req_result = result;
1008 hdev->req_status = HCI_REQ_DONE;
1009 wake_up_interruptible(&hdev->req_wait_q);
1013 static void hci_req_cancel(struct hci_dev *hdev, int err)
1015 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1017 if (hdev->req_status == HCI_REQ_PEND) {
1018 hdev->req_result = err;
1019 hdev->req_status = HCI_REQ_CANCELED;
1020 wake_up_interruptible(&hdev->req_wait_q);
1024 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1027 struct hci_ev_cmd_complete *ev;
1028 struct hci_event_hdr *hdr;
1029 struct sk_buff *skb;
1033 skb = hdev->recv_evt;
1034 hdev->recv_evt = NULL;
1036 hci_dev_unlock(hdev);
1039 return ERR_PTR(-ENODATA);
1041 if (skb->len < sizeof(*hdr)) {
1042 BT_ERR("Too short HCI event");
1046 hdr = (void *) skb->data;
1047 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1050 if (hdr->evt != event)
1055 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1056 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1060 if (skb->len < sizeof(*ev)) {
1061 BT_ERR("Too short cmd_complete event");
1065 ev = (void *) skb->data;
1066 skb_pull(skb, sizeof(*ev));
1068 if (opcode == __le16_to_cpu(ev->opcode))
1071 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1072 __le16_to_cpu(ev->opcode));
1076 return ERR_PTR(-ENODATA);
1079 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1080 const void *param, u8 event, u32 timeout)
1082 DECLARE_WAITQUEUE(wait, current);
1083 struct hci_request req;
1086 BT_DBG("%s", hdev->name);
1088 hci_req_init(&req, hdev);
1090 hci_req_add_ev(&req, opcode, plen, param, event);
1092 hdev->req_status = HCI_REQ_PEND;
1094 err = hci_req_run(&req, hci_req_sync_complete);
1096 return ERR_PTR(err);
1098 add_wait_queue(&hdev->req_wait_q, &wait);
1099 set_current_state(TASK_INTERRUPTIBLE);
1101 schedule_timeout(timeout);
1103 remove_wait_queue(&hdev->req_wait_q, &wait);
1105 if (signal_pending(current))
1106 return ERR_PTR(-EINTR);
1108 switch (hdev->req_status) {
1110 err = -bt_to_errno(hdev->req_result);
1113 case HCI_REQ_CANCELED:
1114 err = -hdev->req_result;
1122 hdev->req_status = hdev->req_result = 0;
1124 BT_DBG("%s end: err %d", hdev->name, err);
1127 return ERR_PTR(err);
1129 return hci_get_cmd_complete(hdev, opcode, event);
1131 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1133 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1134 const void *param, u32 timeout)
1136 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1138 EXPORT_SYMBOL(__hci_cmd_sync);
1140 /* Execute request and wait for completion. */
1141 static int __hci_req_sync(struct hci_dev *hdev,
1142 void (*func)(struct hci_request *req,
1144 unsigned long opt, __u32 timeout)
1146 struct hci_request req;
1147 DECLARE_WAITQUEUE(wait, current);
1150 BT_DBG("%s start", hdev->name);
1152 hci_req_init(&req, hdev);
1154 hdev->req_status = HCI_REQ_PEND;
1158 err = hci_req_run(&req, hci_req_sync_complete);
1160 hdev->req_status = 0;
1162 /* ENODATA means the HCI request command queue is empty.
1163 * This can happen when a request with conditionals doesn't
1164 * trigger any commands to be sent. This is normal behavior
1165 * and should not trigger an error return.
1167 if (err == -ENODATA)
1173 add_wait_queue(&hdev->req_wait_q, &wait);
1174 set_current_state(TASK_INTERRUPTIBLE);
1176 schedule_timeout(timeout);
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1180 if (signal_pending(current))
1183 switch (hdev->req_status) {
1185 err = -bt_to_errno(hdev->req_result);
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1197 hdev->req_status = hdev->req_result = 0;
1199 BT_DBG("%s end: err %d", hdev->name, err);
1204 static int hci_req_sync(struct hci_dev *hdev,
1205 void (*req)(struct hci_request *req,
1207 unsigned long opt, __u32 timeout)
1211 if (!test_bit(HCI_UP, &hdev->flags))
1214 /* Serialize all requests */
1216 ret = __hci_req_sync(hdev, req, opt, timeout);
1217 hci_req_unlock(hdev);
1222 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1224 BT_DBG("%s %ld", req->hdev->name, opt);
1227 set_bit(HCI_RESET, &req->hdev->flags);
1228 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1231 static void bredr_init(struct hci_request *req)
1233 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1235 /* Read Local Supported Features */
1236 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1238 /* Read Local Version */
1239 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1241 /* Read BD Address */
1242 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1245 static void amp_init(struct hci_request *req)
1247 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1249 /* Read Local Version */
1250 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1252 /* Read Local Supported Commands */
1253 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1255 /* Read Local Supported Features */
1256 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1258 /* Read Local AMP Info */
1259 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1261 /* Read Data Blk size */
1262 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1264 /* Read Flow Control Mode */
1265 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1267 /* Read Location Data */
1268 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1271 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1273 struct hci_dev *hdev = req->hdev;
1275 BT_DBG("%s %ld", hdev->name, opt);
1278 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1279 hci_reset_req(req, 0);
1281 switch (hdev->dev_type) {
1291 BT_ERR("Unknown device type %d", hdev->dev_type);
1296 static void bredr_setup(struct hci_request *req)
1298 struct hci_dev *hdev = req->hdev;
1303 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1304 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1306 /* Read Class of Device */
1307 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1309 /* Read Local Name */
1310 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1312 /* Read Voice Setting */
1313 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1315 /* Read Number of Supported IAC */
1316 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1318 /* Read Current IAC LAP */
1319 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1321 /* Clear Event Filters */
1322 flt_type = HCI_FLT_CLEAR_ALL;
1323 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1325 /* Connection accept timeout ~20 secs */
1326 param = cpu_to_le16(0x7d00);
1327 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1329 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1330 * but it does not support page scan related HCI commands.
1332 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1333 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1334 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1338 static void le_setup(struct hci_request *req)
1340 struct hci_dev *hdev = req->hdev;
1342 /* Read LE Buffer Size */
1343 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1345 /* Read LE Local Supported Features */
1346 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1348 /* Read LE Supported States */
1349 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1351 /* Read LE White List Size */
1352 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1354 /* Clear LE White List */
1355 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1357 /* LE-only controllers have LE implicitly enabled */
1358 if (!lmp_bredr_capable(hdev))
1359 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1362 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1364 if (lmp_ext_inq_capable(hdev))
1367 if (lmp_inq_rssi_capable(hdev))
1370 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1371 hdev->lmp_subver == 0x0757)
1374 if (hdev->manufacturer == 15) {
1375 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1377 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1379 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1383 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1384 hdev->lmp_subver == 0x1805)
1390 static void hci_setup_inquiry_mode(struct hci_request *req)
1394 mode = hci_get_inquiry_mode(req->hdev);
1396 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1399 static void hci_setup_event_mask(struct hci_request *req)
1401 struct hci_dev *hdev = req->hdev;
1403 /* The second byte is 0xff instead of 0x9f (two reserved bits
1404 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1405 * command otherwise.
1407 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1409 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1410 * any event mask for pre 1.2 devices.
1412 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1415 if (lmp_bredr_capable(hdev)) {
1416 events[4] |= 0x01; /* Flow Specification Complete */
1417 events[4] |= 0x02; /* Inquiry Result with RSSI */
1418 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1419 events[5] |= 0x08; /* Synchronous Connection Complete */
1420 events[5] |= 0x10; /* Synchronous Connection Changed */
1422 /* Use a different default for LE-only devices */
1423 memset(events, 0, sizeof(events));
1424 events[0] |= 0x10; /* Disconnection Complete */
1425 events[1] |= 0x08; /* Read Remote Version Information Complete */
1426 events[1] |= 0x20; /* Command Complete */
1427 events[1] |= 0x40; /* Command Status */
1428 events[1] |= 0x80; /* Hardware Error */
1429 events[2] |= 0x04; /* Number of Completed Packets */
1430 events[3] |= 0x02; /* Data Buffer Overflow */
1432 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1433 events[0] |= 0x80; /* Encryption Change */
1434 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1438 if (lmp_inq_rssi_capable(hdev))
1439 events[4] |= 0x02; /* Inquiry Result with RSSI */
1441 if (lmp_sniffsubr_capable(hdev))
1442 events[5] |= 0x20; /* Sniff Subrating */
1444 if (lmp_pause_enc_capable(hdev))
1445 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1447 if (lmp_ext_inq_capable(hdev))
1448 events[5] |= 0x40; /* Extended Inquiry Result */
1450 if (lmp_no_flush_capable(hdev))
1451 events[7] |= 0x01; /* Enhanced Flush Complete */
1453 if (lmp_lsto_capable(hdev))
1454 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1456 if (lmp_ssp_capable(hdev)) {
1457 events[6] |= 0x01; /* IO Capability Request */
1458 events[6] |= 0x02; /* IO Capability Response */
1459 events[6] |= 0x04; /* User Confirmation Request */
1460 events[6] |= 0x08; /* User Passkey Request */
1461 events[6] |= 0x10; /* Remote OOB Data Request */
1462 events[6] |= 0x20; /* Simple Pairing Complete */
1463 events[7] |= 0x04; /* User Passkey Notification */
1464 events[7] |= 0x08; /* Keypress Notification */
1465 events[7] |= 0x10; /* Remote Host Supported
1466 * Features Notification
1470 if (lmp_le_capable(hdev))
1471 events[7] |= 0x20; /* LE Meta-Event */
1473 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1476 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1478 struct hci_dev *hdev = req->hdev;
1480 if (lmp_bredr_capable(hdev))
1483 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1485 if (lmp_le_capable(hdev))
1488 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1489 * local supported commands HCI command.
1491 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1492 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1494 if (lmp_ssp_capable(hdev)) {
1495 /* When SSP is available, then the host features page
1496 * should also be available as well. However some
1497 * controllers list the max_page as 0 as long as SSP
1498 * has not been enabled. To achieve proper debugging
1499 * output, force the minimum max_page to 1 at least.
1501 hdev->max_page = 0x01;
1503 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1505 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1506 sizeof(mode), &mode);
1508 struct hci_cp_write_eir cp;
1510 memset(hdev->eir, 0, sizeof(hdev->eir));
1511 memset(&cp, 0, sizeof(cp));
1513 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1517 if (lmp_inq_rssi_capable(hdev))
1518 hci_setup_inquiry_mode(req);
1520 if (lmp_inq_tx_pwr_capable(hdev))
1521 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1523 if (lmp_ext_feat_capable(hdev)) {
1524 struct hci_cp_read_local_ext_features cp;
1527 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1531 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1533 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1538 static void hci_setup_link_policy(struct hci_request *req)
1540 struct hci_dev *hdev = req->hdev;
1541 struct hci_cp_write_def_link_policy cp;
1542 u16 link_policy = 0;
1544 if (lmp_rswitch_capable(hdev))
1545 link_policy |= HCI_LP_RSWITCH;
1546 if (lmp_hold_capable(hdev))
1547 link_policy |= HCI_LP_HOLD;
1548 if (lmp_sniff_capable(hdev))
1549 link_policy |= HCI_LP_SNIFF;
1550 if (lmp_park_capable(hdev))
1551 link_policy |= HCI_LP_PARK;
1553 cp.policy = cpu_to_le16(link_policy);
1554 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1557 static void hci_set_le_support(struct hci_request *req)
1559 struct hci_dev *hdev = req->hdev;
1560 struct hci_cp_write_le_host_supported cp;
1562 /* LE-only devices do not support explicit enablement */
1563 if (!lmp_bredr_capable(hdev))
1566 memset(&cp, 0, sizeof(cp));
1568 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1570 cp.simul = lmp_le_br_capable(hdev);
1573 if (cp.le != lmp_host_le_capable(hdev))
1574 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1578 static void hci_set_event_mask_page_2(struct hci_request *req)
1580 struct hci_dev *hdev = req->hdev;
1581 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1583 /* If Connectionless Slave Broadcast master role is supported
1584 * enable all necessary events for it.
1586 if (lmp_csb_master_capable(hdev)) {
1587 events[1] |= 0x40; /* Triggered Clock Capture */
1588 events[1] |= 0x80; /* Synchronization Train Complete */
1589 events[2] |= 0x10; /* Slave Page Response Timeout */
1590 events[2] |= 0x20; /* CSB Channel Map Change */
1593 /* If Connectionless Slave Broadcast slave role is supported
1594 * enable all necessary events for it.
1596 if (lmp_csb_slave_capable(hdev)) {
1597 events[2] |= 0x01; /* Synchronization Train Received */
1598 events[2] |= 0x02; /* CSB Receive */
1599 events[2] |= 0x04; /* CSB Timeout */
1600 events[2] |= 0x08; /* Truncated Page Complete */
1603 /* Enable Authenticated Payload Timeout Expired event if supported */
1604 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1607 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1610 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1612 struct hci_dev *hdev = req->hdev;
1615 hci_setup_event_mask(req);
1617 /* Some Broadcom based Bluetooth controllers do not support the
1618 * Delete Stored Link Key command. They are clearly indicating its
1619 * absence in the bit mask of supported commands.
1621 * Check the supported commands and only if the the command is marked
1622 * as supported send it. If not supported assume that the controller
1623 * does not have actual support for stored link keys which makes this
1624 * command redundant anyway.
1626 * Some controllers indicate that they support handling deleting
1627 * stored link keys, but they don't. The quirk lets a driver
1628 * just disable this command.
1630 if (hdev->commands[6] & 0x80 &&
1631 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1632 struct hci_cp_delete_stored_link_key cp;
1634 bacpy(&cp.bdaddr, BDADDR_ANY);
1635 cp.delete_all = 0x01;
1636 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1640 if (hdev->commands[5] & 0x10)
1641 hci_setup_link_policy(req);
1643 if (lmp_le_capable(hdev)) {
1646 memset(events, 0, sizeof(events));
1649 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1650 events[0] |= 0x10; /* LE Long Term Key Request */
1652 /* If controller supports the Connection Parameters Request
1653 * Link Layer Procedure, enable the corresponding event.
1655 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1656 events[0] |= 0x20; /* LE Remote Connection
1660 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1663 if (hdev->commands[25] & 0x40) {
1664 /* Read LE Advertising Channel TX Power */
1665 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1668 hci_set_le_support(req);
1671 /* Read features beyond page 1 if available */
1672 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1673 struct hci_cp_read_local_ext_features cp;
1676 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1681 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1683 struct hci_dev *hdev = req->hdev;
1685 /* Set event mask page 2 if the HCI command for it is supported */
1686 if (hdev->commands[22] & 0x04)
1687 hci_set_event_mask_page_2(req);
1689 /* Check for Synchronization Train support */
1690 if (lmp_sync_train_capable(hdev))
1691 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1693 /* Enable Secure Connections if supported and configured */
1694 if ((lmp_sc_capable(hdev) ||
1695 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1696 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1698 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1699 sizeof(support), &support);
1703 static int __hci_init(struct hci_dev *hdev)
1707 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1711 /* The Device Under Test (DUT) mode is special and available for
1712 * all controller types. So just create it early on.
1714 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1715 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1719 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1720 * BR/EDR/LE type controllers. AMP controllers only need the
1723 if (hdev->dev_type != HCI_BREDR)
1726 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1730 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1734 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1738 /* Only create debugfs entries during the initial setup
1739 * phase and not every time the controller gets powered on.
1741 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1744 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1746 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1747 &hdev->manufacturer);
1748 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1749 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1750 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1752 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1754 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1756 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1757 &conn_info_min_age_fops);
1758 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1759 &conn_info_max_age_fops);
1761 if (lmp_bredr_capable(hdev)) {
1762 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1763 hdev, &inquiry_cache_fops);
1764 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1765 hdev, &link_keys_fops);
1766 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1767 hdev, &dev_class_fops);
1768 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1769 hdev, &voice_setting_fops);
1772 if (lmp_ssp_capable(hdev)) {
1773 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1774 hdev, &auto_accept_delay_fops);
1775 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1776 hdev, &force_sc_support_fops);
1777 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1778 hdev, &sc_only_mode_fops);
1781 if (lmp_sniff_capable(hdev)) {
1782 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1783 hdev, &idle_timeout_fops);
1784 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1785 hdev, &sniff_min_interval_fops);
1786 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1787 hdev, &sniff_max_interval_fops);
1790 if (lmp_le_capable(hdev)) {
1791 debugfs_create_file("identity", 0400, hdev->debugfs,
1792 hdev, &identity_fops);
1793 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1794 hdev, &rpa_timeout_fops);
1795 debugfs_create_file("random_address", 0444, hdev->debugfs,
1796 hdev, &random_address_fops);
1797 debugfs_create_file("static_address", 0444, hdev->debugfs,
1798 hdev, &static_address_fops);
1800 /* For controllers with a public address, provide a debug
1801 * option to force the usage of the configured static
1802 * address. By default the public address is used.
1804 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1805 debugfs_create_file("force_static_address", 0644,
1806 hdev->debugfs, hdev,
1807 &force_static_address_fops);
1809 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1810 &hdev->le_white_list_size);
1811 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1813 debugfs_create_file("identity_resolving_keys", 0400,
1814 hdev->debugfs, hdev,
1815 &identity_resolving_keys_fops);
1816 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1817 hdev, &long_term_keys_fops);
1818 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1819 hdev, &conn_min_interval_fops);
1820 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1821 hdev, &conn_max_interval_fops);
1822 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1823 hdev, &conn_latency_fops);
1824 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1825 hdev, &supervision_timeout_fops);
1826 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1827 hdev, &adv_channel_map_fops);
1828 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1830 debugfs_create_u16("discov_interleaved_timeout", 0644,
1832 &hdev->discov_interleaved_timeout);
1838 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1840 struct hci_dev *hdev = req->hdev;
1842 BT_DBG("%s %ld", hdev->name, opt);
1845 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1846 hci_reset_req(req, 0);
1848 /* Read Local Version */
1849 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1851 /* Read BD Address */
1852 if (hdev->set_bdaddr)
1853 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1856 static int __hci_unconf_init(struct hci_dev *hdev)
1860 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1863 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1870 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1874 BT_DBG("%s %x", req->hdev->name, scan);
1876 /* Inquiry and Page scans */
1877 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1880 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1884 BT_DBG("%s %x", req->hdev->name, auth);
1886 /* Authentication */
1887 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1890 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1894 BT_DBG("%s %x", req->hdev->name, encrypt);
1897 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1900 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1902 __le16 policy = cpu_to_le16(opt);
1904 BT_DBG("%s %x", req->hdev->name, policy);
1906 /* Default link policy */
1907 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1910 /* Get HCI device by index.
1911 * Device is held on return. */
1912 struct hci_dev *hci_dev_get(int index)
1914 struct hci_dev *hdev = NULL, *d;
1916 BT_DBG("%d", index);
1921 read_lock(&hci_dev_list_lock);
1922 list_for_each_entry(d, &hci_dev_list, list) {
1923 if (d->id == index) {
1924 hdev = hci_dev_hold(d);
1928 read_unlock(&hci_dev_list_lock);
1932 /* ---- Inquiry support ---- */
1934 bool hci_discovery_active(struct hci_dev *hdev)
1936 struct discovery_state *discov = &hdev->discovery;
1938 switch (discov->state) {
1939 case DISCOVERY_FINDING:
1940 case DISCOVERY_RESOLVING:
1948 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1950 int old_state = hdev->discovery.state;
1952 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1954 if (old_state == state)
1957 hdev->discovery.state = state;
1960 case DISCOVERY_STOPPED:
1961 hci_update_background_scan(hdev);
1963 if (old_state != DISCOVERY_STARTING)
1964 mgmt_discovering(hdev, 0);
1966 case DISCOVERY_STARTING:
1968 case DISCOVERY_FINDING:
1969 mgmt_discovering(hdev, 1);
1971 case DISCOVERY_RESOLVING:
1973 case DISCOVERY_STOPPING:
1978 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1980 struct discovery_state *cache = &hdev->discovery;
1981 struct inquiry_entry *p, *n;
1983 list_for_each_entry_safe(p, n, &cache->all, all) {
1988 INIT_LIST_HEAD(&cache->unknown);
1989 INIT_LIST_HEAD(&cache->resolve);
1992 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1995 struct discovery_state *cache = &hdev->discovery;
1996 struct inquiry_entry *e;
1998 BT_DBG("cache %p, %pMR", cache, bdaddr);
2000 list_for_each_entry(e, &cache->all, all) {
2001 if (!bacmp(&e->data.bdaddr, bdaddr))
2008 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2011 struct discovery_state *cache = &hdev->discovery;
2012 struct inquiry_entry *e;
2014 BT_DBG("cache %p, %pMR", cache, bdaddr);
2016 list_for_each_entry(e, &cache->unknown, list) {
2017 if (!bacmp(&e->data.bdaddr, bdaddr))
2024 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2028 struct discovery_state *cache = &hdev->discovery;
2029 struct inquiry_entry *e;
2031 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2033 list_for_each_entry(e, &cache->resolve, list) {
2034 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2036 if (!bacmp(&e->data.bdaddr, bdaddr))
2043 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2044 struct inquiry_entry *ie)
2046 struct discovery_state *cache = &hdev->discovery;
2047 struct list_head *pos = &cache->resolve;
2048 struct inquiry_entry *p;
2050 list_del(&ie->list);
2052 list_for_each_entry(p, &cache->resolve, list) {
2053 if (p->name_state != NAME_PENDING &&
2054 abs(p->data.rssi) >= abs(ie->data.rssi))
2059 list_add(&ie->list, pos);
2062 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2065 struct discovery_state *cache = &hdev->discovery;
2066 struct inquiry_entry *ie;
2069 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2071 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2073 if (!data->ssp_mode)
2074 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2076 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2078 if (!ie->data.ssp_mode)
2079 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2081 if (ie->name_state == NAME_NEEDED &&
2082 data->rssi != ie->data.rssi) {
2083 ie->data.rssi = data->rssi;
2084 hci_inquiry_cache_update_resolve(hdev, ie);
2090 /* Entry not in the cache. Add new one. */
2091 ie = kzalloc(sizeof(struct inquiry_entry), GFP_KERNEL);
2093 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2097 list_add(&ie->all, &cache->all);
2100 ie->name_state = NAME_KNOWN;
2102 ie->name_state = NAME_NOT_KNOWN;
2103 list_add(&ie->list, &cache->unknown);
2107 if (name_known && ie->name_state != NAME_KNOWN &&
2108 ie->name_state != NAME_PENDING) {
2109 ie->name_state = NAME_KNOWN;
2110 list_del(&ie->list);
2113 memcpy(&ie->data, data, sizeof(*data));
2114 ie->timestamp = jiffies;
2115 cache->timestamp = jiffies;
2117 if (ie->name_state == NAME_NOT_KNOWN)
2118 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2124 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2126 struct discovery_state *cache = &hdev->discovery;
2127 struct inquiry_info *info = (struct inquiry_info *) buf;
2128 struct inquiry_entry *e;
2131 list_for_each_entry(e, &cache->all, all) {
2132 struct inquiry_data *data = &e->data;
2137 bacpy(&info->bdaddr, &data->bdaddr);
2138 info->pscan_rep_mode = data->pscan_rep_mode;
2139 info->pscan_period_mode = data->pscan_period_mode;
2140 info->pscan_mode = data->pscan_mode;
2141 memcpy(info->dev_class, data->dev_class, 3);
2142 info->clock_offset = data->clock_offset;
2148 BT_DBG("cache %p, copied %d", cache, copied);
2152 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2154 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2155 struct hci_dev *hdev = req->hdev;
2156 struct hci_cp_inquiry cp;
2158 BT_DBG("%s", hdev->name);
2160 if (test_bit(HCI_INQUIRY, &hdev->flags))
2164 memcpy(&cp.lap, &ir->lap, 3);
2165 cp.length = ir->length;
2166 cp.num_rsp = ir->num_rsp;
2167 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2170 static int wait_inquiry(void *word)
2173 return signal_pending(current);
2176 int hci_inquiry(void __user *arg)
2178 __u8 __user *ptr = arg;
2179 struct hci_inquiry_req ir;
2180 struct hci_dev *hdev;
2181 int err = 0, do_inquiry = 0, max_rsp;
2185 if (copy_from_user(&ir, ptr, sizeof(ir)))
2188 hdev = hci_dev_get(ir.dev_id);
2192 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2197 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2202 if (hdev->dev_type != HCI_BREDR) {
2207 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2213 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2214 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2215 hci_inquiry_cache_flush(hdev);
2218 hci_dev_unlock(hdev);
2220 timeo = ir.length * msecs_to_jiffies(2000);
2223 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2228 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2229 * cleared). If it is interrupted by a signal, return -EINTR.
2231 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2232 TASK_INTERRUPTIBLE))
2236 /* for unlimited number of responses we will use buffer with
2239 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2241 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2242 * copy it to the user space.
2244 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2251 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2252 hci_dev_unlock(hdev);
2254 BT_DBG("num_rsp %d", ir.num_rsp);
2256 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2258 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2271 static int hci_dev_do_open(struct hci_dev *hdev)
2275 BT_DBG("%s %p", hdev->name, hdev);
2279 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2284 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2285 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2286 /* Check for rfkill but allow the HCI setup stage to
2287 * proceed (which in itself doesn't cause any RF activity).
2289 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2294 /* Check for valid public address or a configured static
2295 * random adddress, but let the HCI setup proceed to
2296 * be able to determine if there is a public address
2299 * In case of user channel usage, it is not important
2300 * if a public address or static random address is
2303 * This check is only valid for BR/EDR controllers
2304 * since AMP controllers do not have an address.
2306 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2307 hdev->dev_type == HCI_BREDR &&
2308 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2309 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2310 ret = -EADDRNOTAVAIL;
2315 if (test_bit(HCI_UP, &hdev->flags)) {
2320 if (hdev->open(hdev)) {
2325 atomic_set(&hdev->cmd_cnt, 1);
2326 set_bit(HCI_INIT, &hdev->flags);
2328 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2330 ret = hdev->setup(hdev);
2332 /* The transport driver can set these quirks before
2333 * creating the HCI device or in its setup callback.
2335 * In case any of them is set, the controller has to
2336 * start up as unconfigured.
2338 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2339 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2340 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2342 /* For an unconfigured controller it is required to
2343 * read at least the version information provided by
2344 * the Read Local Version Information command.
2346 * If the set_bdaddr driver callback is provided, then
2347 * also the original Bluetooth public device address
2348 * will be read using the Read BD Address command.
2350 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2351 ret = __hci_unconf_init(hdev);
2354 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2355 /* If public address change is configured, ensure that
2356 * the address gets programmed. If the driver does not
2357 * support changing the public address, fail the power
2360 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2362 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2364 ret = -EADDRNOTAVAIL;
2368 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2369 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2370 ret = __hci_init(hdev);
2373 clear_bit(HCI_INIT, &hdev->flags);
2377 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2378 set_bit(HCI_UP, &hdev->flags);
2379 hci_notify(hdev, HCI_DEV_UP);
2380 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2381 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2382 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2383 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2384 hdev->dev_type == HCI_BREDR) {
2386 mgmt_powered(hdev, 1);
2387 hci_dev_unlock(hdev);
2390 /* Init failed, cleanup */
2391 flush_work(&hdev->tx_work);
2392 flush_work(&hdev->cmd_work);
2393 flush_work(&hdev->rx_work);
2395 skb_queue_purge(&hdev->cmd_q);
2396 skb_queue_purge(&hdev->rx_q);
2401 if (hdev->sent_cmd) {
2402 kfree_skb(hdev->sent_cmd);
2403 hdev->sent_cmd = NULL;
2407 hdev->flags &= BIT(HCI_RAW);
2411 hci_req_unlock(hdev);
2415 /* ---- HCI ioctl helpers ---- */
2417 int hci_dev_open(__u16 dev)
2419 struct hci_dev *hdev;
2422 hdev = hci_dev_get(dev);
2426 /* Devices that are marked as unconfigured can only be powered
2427 * up as user channel. Trying to bring them up as normal devices
2428 * will result into a failure. Only user channel operation is
2431 * When this function is called for a user channel, the flag
2432 * HCI_USER_CHANNEL will be set first before attempting to
2435 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2436 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2441 /* We need to ensure that no other power on/off work is pending
2442 * before proceeding to call hci_dev_do_open. This is
2443 * particularly important if the setup procedure has not yet
2446 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2447 cancel_delayed_work(&hdev->power_off);
2449 /* After this call it is guaranteed that the setup procedure
2450 * has finished. This means that error conditions like RFKILL
2451 * or no valid public or static random address apply.
2453 flush_workqueue(hdev->req_workqueue);
2455 /* For controllers not using the management interface and that
2456 * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
2457 * so that pairing works for them. Once the management interface
2458 * is in use this bit will be cleared again and userspace has
2459 * to explicitly enable it.
2461 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2462 !test_bit(HCI_MGMT, &hdev->dev_flags))
2463 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2465 err = hci_dev_do_open(hdev);
2472 /* This function requires the caller holds hdev->lock */
2473 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2475 struct hci_conn_params *p;
2477 list_for_each_entry(p, &hdev->le_conn_params, list)
2478 list_del_init(&p->action);
2480 BT_DBG("All LE pending actions cleared");
2483 static int hci_dev_do_close(struct hci_dev *hdev)
2485 BT_DBG("%s %p", hdev->name, hdev);
2487 cancel_delayed_work(&hdev->power_off);
2489 hci_req_cancel(hdev, ENODEV);
2492 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2493 cancel_delayed_work_sync(&hdev->cmd_timer);
2494 hci_req_unlock(hdev);
2498 /* Flush RX and TX works */
2499 flush_work(&hdev->tx_work);
2500 flush_work(&hdev->rx_work);
2502 if (hdev->discov_timeout > 0) {
2503 cancel_delayed_work(&hdev->discov_off);
2504 hdev->discov_timeout = 0;
2505 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2506 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2509 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2510 cancel_delayed_work(&hdev->service_cache);
2512 cancel_delayed_work_sync(&hdev->le_scan_disable);
2514 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2515 cancel_delayed_work_sync(&hdev->rpa_expired);
2518 hci_inquiry_cache_flush(hdev);
2519 hci_conn_hash_flush(hdev);
2520 hci_pend_le_actions_clear(hdev);
2521 hci_dev_unlock(hdev);
2523 hci_notify(hdev, HCI_DEV_DOWN);
2529 skb_queue_purge(&hdev->cmd_q);
2530 atomic_set(&hdev->cmd_cnt, 1);
2531 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2532 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2533 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2534 set_bit(HCI_INIT, &hdev->flags);
2535 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2536 clear_bit(HCI_INIT, &hdev->flags);
2539 /* flush cmd work */
2540 flush_work(&hdev->cmd_work);
2543 skb_queue_purge(&hdev->rx_q);
2544 skb_queue_purge(&hdev->cmd_q);
2545 skb_queue_purge(&hdev->raw_q);
2547 /* Drop last sent command */
2548 if (hdev->sent_cmd) {
2549 cancel_delayed_work_sync(&hdev->cmd_timer);
2550 kfree_skb(hdev->sent_cmd);
2551 hdev->sent_cmd = NULL;
2554 kfree_skb(hdev->recv_evt);
2555 hdev->recv_evt = NULL;
2557 /* After this point our queues are empty
2558 * and no tasks are scheduled. */
2562 hdev->flags &= BIT(HCI_RAW);
2563 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2565 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2566 if (hdev->dev_type == HCI_BREDR) {
2568 mgmt_powered(hdev, 0);
2569 hci_dev_unlock(hdev);
2573 /* Controller radio is available but is currently powered down */
2574 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2576 memset(hdev->eir, 0, sizeof(hdev->eir));
2577 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2578 bacpy(&hdev->random_addr, BDADDR_ANY);
2580 hci_req_unlock(hdev);
2586 int hci_dev_close(__u16 dev)
2588 struct hci_dev *hdev;
2591 hdev = hci_dev_get(dev);
2595 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2600 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2601 cancel_delayed_work(&hdev->power_off);
2603 err = hci_dev_do_close(hdev);
2610 int hci_dev_reset(__u16 dev)
2612 struct hci_dev *hdev;
2615 hdev = hci_dev_get(dev);
2621 if (!test_bit(HCI_UP, &hdev->flags)) {
2626 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2631 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2637 skb_queue_purge(&hdev->rx_q);
2638 skb_queue_purge(&hdev->cmd_q);
2641 hci_inquiry_cache_flush(hdev);
2642 hci_conn_hash_flush(hdev);
2643 hci_dev_unlock(hdev);
2648 atomic_set(&hdev->cmd_cnt, 1);
2649 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2651 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2654 hci_req_unlock(hdev);
2659 int hci_dev_reset_stat(__u16 dev)
2661 struct hci_dev *hdev;
2664 hdev = hci_dev_get(dev);
2668 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2673 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2678 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2685 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2687 bool conn_changed, discov_changed;
2689 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2691 if ((scan & SCAN_PAGE))
2692 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2695 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2698 if ((scan & SCAN_INQUIRY)) {
2699 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2702 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2703 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2707 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2710 if (conn_changed || discov_changed) {
2711 /* In case this was disabled through mgmt */
2712 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2714 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2715 mgmt_update_adv_data(hdev);
2717 mgmt_new_settings(hdev);
2721 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2723 struct hci_dev *hdev;
2724 struct hci_dev_req dr;
2727 if (copy_from_user(&dr, arg, sizeof(dr)))
2730 hdev = hci_dev_get(dr.dev_id);
2734 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2739 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2744 if (hdev->dev_type != HCI_BREDR) {
2749 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2756 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2761 if (!lmp_encrypt_capable(hdev)) {
2766 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2767 /* Auth must be enabled first */
2768 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2774 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2779 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2782 /* Ensure that the connectable and discoverable states
2783 * get correctly modified as this was a non-mgmt change.
2786 hci_update_scan_state(hdev, dr.dev_opt);
2790 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2794 case HCISETLINKMODE:
2795 hdev->link_mode = ((__u16) dr.dev_opt) &
2796 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2800 hdev->pkt_type = (__u16) dr.dev_opt;
2804 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2805 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2809 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2810 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2823 int hci_get_dev_list(void __user *arg)
2825 struct hci_dev *hdev;
2826 struct hci_dev_list_req *dl;
2827 struct hci_dev_req *dr;
2828 int n = 0, size, err;
2831 if (get_user(dev_num, (__u16 __user *) arg))
2834 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2837 size = sizeof(*dl) + dev_num * sizeof(*dr);
2839 dl = kzalloc(size, GFP_KERNEL);
2845 read_lock(&hci_dev_list_lock);
2846 list_for_each_entry(hdev, &hci_dev_list, list) {
2847 unsigned long flags = hdev->flags;
2849 /* When the auto-off is configured it means the transport
2850 * is running, but in that case still indicate that the
2851 * device is actually down.
2853 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2854 flags &= ~BIT(HCI_UP);
2856 (dr + n)->dev_id = hdev->id;
2857 (dr + n)->dev_opt = flags;
2862 read_unlock(&hci_dev_list_lock);
2865 size = sizeof(*dl) + n * sizeof(*dr);
2867 err = copy_to_user(arg, dl, size);
2870 return err ? -EFAULT : 0;
2873 int hci_get_dev_info(void __user *arg)
2875 struct hci_dev *hdev;
2876 struct hci_dev_info di;
2877 unsigned long flags;
2880 if (copy_from_user(&di, arg, sizeof(di)))
2883 hdev = hci_dev_get(di.dev_id);
2887 /* When the auto-off is configured it means the transport
2888 * is running, but in that case still indicate that the
2889 * device is actually down.
2891 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2892 flags = hdev->flags & ~BIT(HCI_UP);
2894 flags = hdev->flags;
2896 strcpy(di.name, hdev->name);
2897 di.bdaddr = hdev->bdaddr;
2898 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2900 di.pkt_type = hdev->pkt_type;
2901 if (lmp_bredr_capable(hdev)) {
2902 di.acl_mtu = hdev->acl_mtu;
2903 di.acl_pkts = hdev->acl_pkts;
2904 di.sco_mtu = hdev->sco_mtu;
2905 di.sco_pkts = hdev->sco_pkts;
2907 di.acl_mtu = hdev->le_mtu;
2908 di.acl_pkts = hdev->le_pkts;
2912 di.link_policy = hdev->link_policy;
2913 di.link_mode = hdev->link_mode;
2915 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2916 memcpy(&di.features, &hdev->features, sizeof(di.features));
2918 if (copy_to_user(arg, &di, sizeof(di)))
2926 /* ---- Interface to HCI drivers ---- */
2928 static int hci_rfkill_set_block(void *data, bool blocked)
2930 struct hci_dev *hdev = data;
2932 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2934 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2938 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2939 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2940 !test_bit(HCI_CONFIG, &hdev->dev_flags))
2941 hci_dev_do_close(hdev);
2943 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2949 static const struct rfkill_ops hci_rfkill_ops = {
2950 .set_block = hci_rfkill_set_block,
2953 static void hci_power_on(struct work_struct *work)
2955 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2958 BT_DBG("%s", hdev->name);
2960 err = hci_dev_do_open(hdev);
2962 mgmt_set_powered_failed(hdev, err);
2966 /* During the HCI setup phase, a few error conditions are
2967 * ignored and they need to be checked now. If they are still
2968 * valid, it is important to turn the device back off.
2970 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2971 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2972 (hdev->dev_type == HCI_BREDR &&
2973 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2974 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2975 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2976 hci_dev_do_close(hdev);
2977 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2978 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2979 HCI_AUTO_OFF_TIMEOUT);
2982 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2983 /* For unconfigured devices, set the HCI_RAW flag
2984 * so that userspace can easily identify them.
2986 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2987 set_bit(HCI_RAW, &hdev->flags);
2989 /* For fully configured devices, this will send
2990 * the Index Added event. For unconfigured devices,
2991 * it will send Unconfigued Index Added event.
2993 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2994 * and no event will be send.
2996 mgmt_index_added(hdev);
2997 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2998 /* When the controller is now configured, then it
2999 * is important to clear the HCI_RAW flag.
3001 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3002 clear_bit(HCI_RAW, &hdev->flags);
3004 /* Powering on the controller with HCI_CONFIG set only
3005 * happens with the transition from unconfigured to
3006 * configured. This will send the Index Added event.
3008 mgmt_index_added(hdev);
3012 static void hci_power_off(struct work_struct *work)
3014 struct hci_dev *hdev = container_of(work, struct hci_dev,
3017 BT_DBG("%s", hdev->name);
3019 hci_dev_do_close(hdev);
3022 static void hci_discov_off(struct work_struct *work)
3024 struct hci_dev *hdev;
3026 hdev = container_of(work, struct hci_dev, discov_off.work);
3028 BT_DBG("%s", hdev->name);
3030 mgmt_discoverable_timeout(hdev);
3033 void hci_uuids_clear(struct hci_dev *hdev)
3035 struct bt_uuid *uuid, *tmp;
3037 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3038 list_del(&uuid->list);
3043 void hci_link_keys_clear(struct hci_dev *hdev)
3045 struct list_head *p, *n;
3047 list_for_each_safe(p, n, &hdev->link_keys) {
3048 struct link_key *key;
3050 key = list_entry(p, struct link_key, list);
3057 void hci_smp_ltks_clear(struct hci_dev *hdev)
3059 struct smp_ltk *k, *tmp;
3061 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3067 void hci_smp_irks_clear(struct hci_dev *hdev)
3069 struct smp_irk *k, *tmp;
3071 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3077 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3081 list_for_each_entry(k, &hdev->link_keys, list)
3082 if (bacmp(bdaddr, &k->bdaddr) == 0)
3088 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3089 u8 key_type, u8 old_key_type)
3092 if (key_type < 0x03)
3095 /* Debug keys are insecure so don't store them persistently */
3096 if (key_type == HCI_LK_DEBUG_COMBINATION)
3099 /* Changed combination key and there's no previous one */
3100 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3103 /* Security mode 3 case */
3107 /* Neither local nor remote side had no-bonding as requirement */
3108 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3111 /* Local side had dedicated bonding as requirement */
3112 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3115 /* Remote side had dedicated bonding as requirement */
3116 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3119 /* If none of the above criteria match, then don't store the key
3124 static bool ltk_type_master(u8 type)
3126 return (type == SMP_LTK);
3129 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3134 list_for_each_entry(k, &hdev->long_term_keys, list) {
3135 if (k->ediv != ediv || k->rand != rand)
3138 if (ltk_type_master(k->type) != master)
3147 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3148 u8 addr_type, bool master)
3152 list_for_each_entry(k, &hdev->long_term_keys, list)
3153 if (addr_type == k->bdaddr_type &&
3154 bacmp(bdaddr, &k->bdaddr) == 0 &&
3155 ltk_type_master(k->type) == master)
3161 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3163 struct smp_irk *irk;
3165 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3166 if (!bacmp(&irk->rpa, rpa))
3170 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3171 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3172 bacpy(&irk->rpa, rpa);
3180 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3183 struct smp_irk *irk;
3185 /* Identity Address must be public or static random */
3186 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3189 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3190 if (addr_type == irk->addr_type &&
3191 bacmp(bdaddr, &irk->bdaddr) == 0)
3198 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3199 bdaddr_t *bdaddr, u8 *val, u8 type,
3200 u8 pin_len, bool *persistent)
3202 struct link_key *key, *old_key;
3205 old_key = hci_find_link_key(hdev, bdaddr);
3207 old_key_type = old_key->type;
3210 old_key_type = conn ? conn->key_type : 0xff;
3211 key = kzalloc(sizeof(*key), GFP_KERNEL);
3214 list_add(&key->list, &hdev->link_keys);
3217 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3219 /* Some buggy controller combinations generate a changed
3220 * combination key for legacy pairing even when there's no
3222 if (type == HCI_LK_CHANGED_COMBINATION &&
3223 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3224 type = HCI_LK_COMBINATION;
3226 conn->key_type = type;
3229 bacpy(&key->bdaddr, bdaddr);
3230 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3231 key->pin_len = pin_len;
3233 if (type == HCI_LK_CHANGED_COMBINATION)
3234 key->type = old_key_type;
3239 *persistent = hci_persistent_key(hdev, conn, type,
3245 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3246 u8 addr_type, u8 type, u8 authenticated,
3247 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3249 struct smp_ltk *key, *old_key;
3250 bool master = ltk_type_master(type);
3252 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3256 key = kzalloc(sizeof(*key), GFP_KERNEL);
3259 list_add(&key->list, &hdev->long_term_keys);
3262 bacpy(&key->bdaddr, bdaddr);
3263 key->bdaddr_type = addr_type;
3264 memcpy(key->val, tk, sizeof(key->val));
3265 key->authenticated = authenticated;
3268 key->enc_size = enc_size;
3274 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3275 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3277 struct smp_irk *irk;
3279 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3281 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3285 bacpy(&irk->bdaddr, bdaddr);
3286 irk->addr_type = addr_type;
3288 list_add(&irk->list, &hdev->identity_resolving_keys);
3291 memcpy(irk->val, val, 16);
3292 bacpy(&irk->rpa, rpa);
3297 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3299 struct link_key *key;
3301 key = hci_find_link_key(hdev, bdaddr);
3305 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3307 list_del(&key->list);
3313 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3315 struct smp_ltk *k, *tmp;
3318 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3319 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3322 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3329 return removed ? 0 : -ENOENT;
3332 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3334 struct smp_irk *k, *tmp;
3336 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3337 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3340 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3347 /* HCI command timer function */
3348 static void hci_cmd_timeout(struct work_struct *work)
3350 struct hci_dev *hdev = container_of(work, struct hci_dev,
3353 if (hdev->sent_cmd) {
3354 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3355 u16 opcode = __le16_to_cpu(sent->opcode);
3357 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3359 BT_ERR("%s command tx timeout", hdev->name);
3362 atomic_set(&hdev->cmd_cnt, 1);
3363 queue_work(hdev->workqueue, &hdev->cmd_work);
3366 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3369 struct oob_data *data;
3371 list_for_each_entry(data, &hdev->remote_oob_data, list)
3372 if (bacmp(bdaddr, &data->bdaddr) == 0)
3378 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3380 struct oob_data *data;
3382 data = hci_find_remote_oob_data(hdev, bdaddr);
3386 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3388 list_del(&data->list);
3394 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3396 struct oob_data *data, *n;
3398 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3399 list_del(&data->list);
3404 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3405 u8 *hash, u8 *randomizer)
3407 struct oob_data *data;
3409 data = hci_find_remote_oob_data(hdev, bdaddr);
3411 data = kmalloc(sizeof(*data), GFP_KERNEL);
3415 bacpy(&data->bdaddr, bdaddr);
3416 list_add(&data->list, &hdev->remote_oob_data);
3419 memcpy(data->hash192, hash, sizeof(data->hash192));
3420 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3422 memset(data->hash256, 0, sizeof(data->hash256));
3423 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3425 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3430 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3431 u8 *hash192, u8 *randomizer192,
3432 u8 *hash256, u8 *randomizer256)
3434 struct oob_data *data;
3436 data = hci_find_remote_oob_data(hdev, bdaddr);
3438 data = kmalloc(sizeof(*data), GFP_KERNEL);
3442 bacpy(&data->bdaddr, bdaddr);
3443 list_add(&data->list, &hdev->remote_oob_data);
3446 memcpy(data->hash192, hash192, sizeof(data->hash192));
3447 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3449 memcpy(data->hash256, hash256, sizeof(data->hash256));
3450 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3452 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3457 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3458 bdaddr_t *bdaddr, u8 type)
3460 struct bdaddr_list *b;
3462 list_for_each_entry(b, bdaddr_list, list) {
3463 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3470 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3472 struct list_head *p, *n;
3474 list_for_each_safe(p, n, bdaddr_list) {
3475 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3482 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3484 struct bdaddr_list *entry;
3486 if (!bacmp(bdaddr, BDADDR_ANY))
3489 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3492 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3496 bacpy(&entry->bdaddr, bdaddr);
3497 entry->bdaddr_type = type;
3499 list_add(&entry->list, list);
3504 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3506 struct bdaddr_list *entry;
3508 if (!bacmp(bdaddr, BDADDR_ANY)) {
3509 hci_bdaddr_list_clear(list);
3513 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3517 list_del(&entry->list);
3523 /* This function requires the caller holds hdev->lock */
3524 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3525 bdaddr_t *addr, u8 addr_type)
3527 struct hci_conn_params *params;
3529 /* The conn params list only contains identity addresses */
3530 if (!hci_is_identity_address(addr, addr_type))
3533 list_for_each_entry(params, &hdev->le_conn_params, list) {
3534 if (bacmp(¶ms->addr, addr) == 0 &&
3535 params->addr_type == addr_type) {
3543 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3545 struct hci_conn *conn;
3547 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3551 if (conn->dst_type != type)
3554 if (conn->state != BT_CONNECTED)
3560 /* This function requires the caller holds hdev->lock */
3561 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3562 bdaddr_t *addr, u8 addr_type)
3564 struct hci_conn_params *param;
3566 /* The list only contains identity addresses */
3567 if (!hci_is_identity_address(addr, addr_type))
3570 list_for_each_entry(param, list, action) {
3571 if (bacmp(¶m->addr, addr) == 0 &&
3572 param->addr_type == addr_type)
3579 /* This function requires the caller holds hdev->lock */
3580 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3581 bdaddr_t *addr, u8 addr_type)
3583 struct hci_conn_params *params;
3585 if (!hci_is_identity_address(addr, addr_type))
3588 params = hci_conn_params_lookup(hdev, addr, addr_type);
3592 params = kzalloc(sizeof(*params), GFP_KERNEL);
3594 BT_ERR("Out of memory");
3598 bacpy(¶ms->addr, addr);
3599 params->addr_type = addr_type;
3601 list_add(¶ms->list, &hdev->le_conn_params);
3602 INIT_LIST_HEAD(¶ms->action);
3604 params->conn_min_interval = hdev->le_conn_min_interval;
3605 params->conn_max_interval = hdev->le_conn_max_interval;
3606 params->conn_latency = hdev->le_conn_latency;
3607 params->supervision_timeout = hdev->le_supv_timeout;
3608 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3610 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3615 /* This function requires the caller holds hdev->lock */
3616 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3619 struct hci_conn_params *params;
3621 params = hci_conn_params_add(hdev, addr, addr_type);
3625 if (params->auto_connect == auto_connect)
3628 list_del_init(¶ms->action);
3630 switch (auto_connect) {
3631 case HCI_AUTO_CONN_DISABLED:
3632 case HCI_AUTO_CONN_LINK_LOSS:
3633 hci_update_background_scan(hdev);
3635 case HCI_AUTO_CONN_REPORT:
3636 list_add(¶ms->action, &hdev->pend_le_reports);
3637 hci_update_background_scan(hdev);
3639 case HCI_AUTO_CONN_ALWAYS:
3640 if (!is_connected(hdev, addr, addr_type)) {
3641 list_add(¶ms->action, &hdev->pend_le_conns);
3642 hci_update_background_scan(hdev);
3647 params->auto_connect = auto_connect;
3649 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3655 /* This function requires the caller holds hdev->lock */
3656 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3658 struct hci_conn_params *params;
3660 params = hci_conn_params_lookup(hdev, addr, addr_type);
3664 list_del(¶ms->action);
3665 list_del(¶ms->list);
3668 hci_update_background_scan(hdev);
3670 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3673 /* This function requires the caller holds hdev->lock */
3674 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3676 struct hci_conn_params *params, *tmp;
3678 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3679 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3681 list_del(¶ms->list);
3685 BT_DBG("All LE disabled connection parameters were removed");
3688 /* This function requires the caller holds hdev->lock */
3689 void hci_conn_params_clear_all(struct hci_dev *hdev)
3691 struct hci_conn_params *params, *tmp;
3693 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3694 list_del(¶ms->action);
3695 list_del(¶ms->list);
3699 hci_update_background_scan(hdev);
3701 BT_DBG("All LE connection parameters were removed");
3704 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3707 BT_ERR("Failed to start inquiry: status %d", status);
3710 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3711 hci_dev_unlock(hdev);
3716 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3718 /* General inquiry access code (GIAC) */
3719 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3720 struct hci_request req;
3721 struct hci_cp_inquiry cp;
3725 BT_ERR("Failed to disable LE scanning: status %d", status);
3729 switch (hdev->discovery.type) {
3730 case DISCOV_TYPE_LE:
3732 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3733 hci_dev_unlock(hdev);
3736 case DISCOV_TYPE_INTERLEAVED:
3737 hci_req_init(&req, hdev);
3739 memset(&cp, 0, sizeof(cp));
3740 memcpy(&cp.lap, lap, sizeof(cp.lap));
3741 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3742 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3746 hci_inquiry_cache_flush(hdev);
3748 err = hci_req_run(&req, inquiry_complete);
3750 BT_ERR("Inquiry request failed: err %d", err);
3751 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3754 hci_dev_unlock(hdev);
3759 static void le_scan_disable_work(struct work_struct *work)
3761 struct hci_dev *hdev = container_of(work, struct hci_dev,
3762 le_scan_disable.work);
3763 struct hci_request req;
3766 BT_DBG("%s", hdev->name);
3768 hci_req_init(&req, hdev);
3770 hci_req_add_le_scan_disable(&req);
3772 err = hci_req_run(&req, le_scan_disable_work_complete);
3774 BT_ERR("Disable LE scanning request failed: err %d", err);
3777 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3779 struct hci_dev *hdev = req->hdev;
3781 /* If we're advertising or initiating an LE connection we can't
3782 * go ahead and change the random address at this time. This is
3783 * because the eventual initiator address used for the
3784 * subsequently created connection will be undefined (some
3785 * controllers use the new address and others the one we had
3786 * when the operation started).
3788 * In this kind of scenario skip the update and let the random
3789 * address be updated at the next cycle.
3791 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3792 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3793 BT_DBG("Deferring random address update");
3797 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3800 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3803 struct hci_dev *hdev = req->hdev;
3806 /* If privacy is enabled use a resolvable private address. If
3807 * current RPA has expired or there is something else than
3808 * the current RPA in use, then generate a new one.
3810 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3813 *own_addr_type = ADDR_LE_DEV_RANDOM;
3815 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3816 !bacmp(&hdev->random_addr, &hdev->rpa))
3819 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3821 BT_ERR("%s failed to generate new RPA", hdev->name);
3825 set_random_addr(req, &hdev->rpa);
3827 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3828 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3833 /* In case of required privacy without resolvable private address,
3834 * use an unresolvable private address. This is useful for active
3835 * scanning and non-connectable advertising.
3837 if (require_privacy) {
3840 get_random_bytes(&urpa, 6);
3841 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3843 *own_addr_type = ADDR_LE_DEV_RANDOM;
3844 set_random_addr(req, &urpa);
3848 /* If forcing static address is in use or there is no public
3849 * address use the static address as random address (but skip
3850 * the HCI command if the current random address is already the
3853 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3854 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3855 *own_addr_type = ADDR_LE_DEV_RANDOM;
3856 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3857 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3858 &hdev->static_addr);
3862 /* Neither privacy nor static address is being used so use a
3865 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3870 /* Copy the Identity Address of the controller.
3872 * If the controller has a public BD_ADDR, then by default use that one.
3873 * If this is a LE only controller without a public address, default to
3874 * the static random address.
3876 * For debugging purposes it is possible to force controllers with a
3877 * public address to use the static random address instead.
3879 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3882 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3883 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3884 bacpy(bdaddr, &hdev->static_addr);
3885 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3887 bacpy(bdaddr, &hdev->bdaddr);
3888 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3892 /* Alloc HCI device */
3893 struct hci_dev *hci_alloc_dev(void)
3895 struct hci_dev *hdev;
3897 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3901 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3902 hdev->esco_type = (ESCO_HV1);
3903 hdev->link_mode = (HCI_LM_ACCEPT);
3904 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3905 hdev->io_capability = 0x03; /* No Input No Output */
3906 hdev->manufacturer = 0xffff; /* Default to internal use */
3907 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3908 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3910 hdev->sniff_max_interval = 800;
3911 hdev->sniff_min_interval = 80;
3913 hdev->le_adv_channel_map = 0x07;
3914 hdev->le_scan_interval = 0x0060;
3915 hdev->le_scan_window = 0x0030;
3916 hdev->le_conn_min_interval = 0x0028;
3917 hdev->le_conn_max_interval = 0x0038;
3918 hdev->le_conn_latency = 0x0000;
3919 hdev->le_supv_timeout = 0x002a;
3921 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3922 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3923 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3924 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3926 mutex_init(&hdev->lock);
3927 mutex_init(&hdev->req_lock);
3929 INIT_LIST_HEAD(&hdev->mgmt_pending);
3930 INIT_LIST_HEAD(&hdev->blacklist);
3931 INIT_LIST_HEAD(&hdev->whitelist);
3932 INIT_LIST_HEAD(&hdev->uuids);
3933 INIT_LIST_HEAD(&hdev->link_keys);
3934 INIT_LIST_HEAD(&hdev->long_term_keys);
3935 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3936 INIT_LIST_HEAD(&hdev->remote_oob_data);
3937 INIT_LIST_HEAD(&hdev->le_white_list);
3938 INIT_LIST_HEAD(&hdev->le_conn_params);
3939 INIT_LIST_HEAD(&hdev->pend_le_conns);
3940 INIT_LIST_HEAD(&hdev->pend_le_reports);
3941 INIT_LIST_HEAD(&hdev->conn_hash.list);
3943 INIT_WORK(&hdev->rx_work, hci_rx_work);
3944 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3945 INIT_WORK(&hdev->tx_work, hci_tx_work);
3946 INIT_WORK(&hdev->power_on, hci_power_on);
3948 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3949 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3950 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3952 skb_queue_head_init(&hdev->rx_q);
3953 skb_queue_head_init(&hdev->cmd_q);
3954 skb_queue_head_init(&hdev->raw_q);
3956 init_waitqueue_head(&hdev->req_wait_q);
3958 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3960 hci_init_sysfs(hdev);
3961 discovery_init(hdev);
3965 EXPORT_SYMBOL(hci_alloc_dev);
3967 /* Free HCI device */
3968 void hci_free_dev(struct hci_dev *hdev)
3970 /* will free via device release */
3971 put_device(&hdev->dev);
3973 EXPORT_SYMBOL(hci_free_dev);
3975 /* Register HCI device */
3976 int hci_register_dev(struct hci_dev *hdev)
3980 if (!hdev->open || !hdev->close || !hdev->send)
3983 /* Do not allow HCI_AMP devices to register at index 0,
3984 * so the index can be used as the AMP controller ID.
3986 switch (hdev->dev_type) {
3988 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3991 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4000 sprintf(hdev->name, "hci%d", id);
4003 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4005 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4006 WQ_MEM_RECLAIM, 1, hdev->name);
4007 if (!hdev->workqueue) {
4012 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4013 WQ_MEM_RECLAIM, 1, hdev->name);
4014 if (!hdev->req_workqueue) {
4015 destroy_workqueue(hdev->workqueue);
4020 if (!IS_ERR_OR_NULL(bt_debugfs))
4021 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4023 dev_set_name(&hdev->dev, "%s", hdev->name);
4025 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4027 if (IS_ERR(hdev->tfm_aes)) {
4028 BT_ERR("Unable to create crypto context");
4029 error = PTR_ERR(hdev->tfm_aes);
4030 hdev->tfm_aes = NULL;
4034 error = device_add(&hdev->dev);
4038 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4039 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4042 if (rfkill_register(hdev->rfkill) < 0) {
4043 rfkill_destroy(hdev->rfkill);
4044 hdev->rfkill = NULL;
4048 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4049 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4051 set_bit(HCI_SETUP, &hdev->dev_flags);
4052 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4054 if (hdev->dev_type == HCI_BREDR) {
4055 /* Assume BR/EDR support until proven otherwise (such as
4056 * through reading supported features during init.
4058 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4061 write_lock(&hci_dev_list_lock);
4062 list_add(&hdev->list, &hci_dev_list);
4063 write_unlock(&hci_dev_list_lock);
4065 /* Devices that are marked for raw-only usage are unconfigured
4066 * and should not be included in normal operation.
4068 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4069 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4071 hci_notify(hdev, HCI_DEV_REG);
4074 queue_work(hdev->req_workqueue, &hdev->power_on);
4079 crypto_free_blkcipher(hdev->tfm_aes);
4081 destroy_workqueue(hdev->workqueue);
4082 destroy_workqueue(hdev->req_workqueue);
4084 ida_simple_remove(&hci_index_ida, hdev->id);
4088 EXPORT_SYMBOL(hci_register_dev);
4090 /* Unregister HCI device */
4091 void hci_unregister_dev(struct hci_dev *hdev)
4095 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4097 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4101 write_lock(&hci_dev_list_lock);
4102 list_del(&hdev->list);
4103 write_unlock(&hci_dev_list_lock);
4105 hci_dev_do_close(hdev);
4107 for (i = 0; i < NUM_REASSEMBLY; i++)
4108 kfree_skb(hdev->reassembly[i]);
4110 cancel_work_sync(&hdev->power_on);
4112 if (!test_bit(HCI_INIT, &hdev->flags) &&
4113 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4114 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4116 mgmt_index_removed(hdev);
4117 hci_dev_unlock(hdev);
4120 /* mgmt_index_removed should take care of emptying the
4122 BUG_ON(!list_empty(&hdev->mgmt_pending));
4124 hci_notify(hdev, HCI_DEV_UNREG);
4127 rfkill_unregister(hdev->rfkill);
4128 rfkill_destroy(hdev->rfkill);
4132 crypto_free_blkcipher(hdev->tfm_aes);
4134 device_del(&hdev->dev);
4136 debugfs_remove_recursive(hdev->debugfs);
4138 destroy_workqueue(hdev->workqueue);
4139 destroy_workqueue(hdev->req_workqueue);
4142 hci_bdaddr_list_clear(&hdev->blacklist);
4143 hci_bdaddr_list_clear(&hdev->whitelist);
4144 hci_uuids_clear(hdev);
4145 hci_link_keys_clear(hdev);
4146 hci_smp_ltks_clear(hdev);
4147 hci_smp_irks_clear(hdev);
4148 hci_remote_oob_data_clear(hdev);
4149 hci_bdaddr_list_clear(&hdev->le_white_list);
4150 hci_conn_params_clear_all(hdev);
4151 hci_dev_unlock(hdev);
4155 ida_simple_remove(&hci_index_ida, id);
4157 EXPORT_SYMBOL(hci_unregister_dev);
4159 /* Suspend HCI device */
4160 int hci_suspend_dev(struct hci_dev *hdev)
4162 hci_notify(hdev, HCI_DEV_SUSPEND);
4165 EXPORT_SYMBOL(hci_suspend_dev);
4167 /* Resume HCI device */
4168 int hci_resume_dev(struct hci_dev *hdev)
4170 hci_notify(hdev, HCI_DEV_RESUME);
4173 EXPORT_SYMBOL(hci_resume_dev);
4175 /* Receive frame from HCI drivers */
4176 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4178 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4179 && !test_bit(HCI_INIT, &hdev->flags))) {
4185 bt_cb(skb)->incoming = 1;
4188 __net_timestamp(skb);
4190 skb_queue_tail(&hdev->rx_q, skb);
4191 queue_work(hdev->workqueue, &hdev->rx_work);
4195 EXPORT_SYMBOL(hci_recv_frame);
4197 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4198 int count, __u8 index)
4203 struct sk_buff *skb;
4204 struct bt_skb_cb *scb;
4206 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4207 index >= NUM_REASSEMBLY)
4210 skb = hdev->reassembly[index];
4214 case HCI_ACLDATA_PKT:
4215 len = HCI_MAX_FRAME_SIZE;
4216 hlen = HCI_ACL_HDR_SIZE;
4219 len = HCI_MAX_EVENT_SIZE;
4220 hlen = HCI_EVENT_HDR_SIZE;
4222 case HCI_SCODATA_PKT:
4223 len = HCI_MAX_SCO_SIZE;
4224 hlen = HCI_SCO_HDR_SIZE;
4228 skb = bt_skb_alloc(len, GFP_ATOMIC);
4232 scb = (void *) skb->cb;
4234 scb->pkt_type = type;
4236 hdev->reassembly[index] = skb;
4240 scb = (void *) skb->cb;
4241 len = min_t(uint, scb->expect, count);
4243 memcpy(skb_put(skb, len), data, len);
4252 if (skb->len == HCI_EVENT_HDR_SIZE) {
4253 struct hci_event_hdr *h = hci_event_hdr(skb);
4254 scb->expect = h->plen;
4256 if (skb_tailroom(skb) < scb->expect) {
4258 hdev->reassembly[index] = NULL;
4264 case HCI_ACLDATA_PKT:
4265 if (skb->len == HCI_ACL_HDR_SIZE) {
4266 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4267 scb->expect = __le16_to_cpu(h->dlen);
4269 if (skb_tailroom(skb) < scb->expect) {
4271 hdev->reassembly[index] = NULL;
4277 case HCI_SCODATA_PKT:
4278 if (skb->len == HCI_SCO_HDR_SIZE) {
4279 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4280 scb->expect = h->dlen;
4282 if (skb_tailroom(skb) < scb->expect) {
4284 hdev->reassembly[index] = NULL;
4291 if (scb->expect == 0) {
4292 /* Complete frame */
4294 bt_cb(skb)->pkt_type = type;
4295 hci_recv_frame(hdev, skb);
4297 hdev->reassembly[index] = NULL;
4305 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4309 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4313 rem = hci_reassembly(hdev, type, data, count, type - 1);
4317 data += (count - rem);
4323 EXPORT_SYMBOL(hci_recv_fragment);
4325 #define STREAM_REASSEMBLY 0
4327 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4333 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4336 struct { char type; } *pkt;
4338 /* Start of the frame */
4345 type = bt_cb(skb)->pkt_type;
4347 rem = hci_reassembly(hdev, type, data, count,
4352 data += (count - rem);
4358 EXPORT_SYMBOL(hci_recv_stream_fragment);
4360 /* ---- Interface to upper protocols ---- */
4362 int hci_register_cb(struct hci_cb *cb)
4364 BT_DBG("%p name %s", cb, cb->name);
4366 write_lock(&hci_cb_list_lock);
4367 list_add(&cb->list, &hci_cb_list);
4368 write_unlock(&hci_cb_list_lock);
4372 EXPORT_SYMBOL(hci_register_cb);
4374 int hci_unregister_cb(struct hci_cb *cb)
4376 BT_DBG("%p name %s", cb, cb->name);
4378 write_lock(&hci_cb_list_lock);
4379 list_del(&cb->list);
4380 write_unlock(&hci_cb_list_lock);
4384 EXPORT_SYMBOL(hci_unregister_cb);
4386 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4390 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4393 __net_timestamp(skb);
4395 /* Send copy to monitor */
4396 hci_send_to_monitor(hdev, skb);
4398 if (atomic_read(&hdev->promisc)) {
4399 /* Send copy to the sockets */
4400 hci_send_to_sock(hdev, skb);
4403 /* Get rid of skb owner, prior to sending to the driver. */
4406 err = hdev->send(hdev, skb);
4408 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4413 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4415 skb_queue_head_init(&req->cmd_q);
4420 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4422 struct hci_dev *hdev = req->hdev;
4423 struct sk_buff *skb;
4424 unsigned long flags;
4426 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4428 /* If an error occured during request building, remove all HCI
4429 * commands queued on the HCI request queue.
4432 skb_queue_purge(&req->cmd_q);
4436 /* Do not allow empty requests */
4437 if (skb_queue_empty(&req->cmd_q))
4440 skb = skb_peek_tail(&req->cmd_q);
4441 bt_cb(skb)->req.complete = complete;
4443 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4444 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4445 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4447 queue_work(hdev->workqueue, &hdev->cmd_work);
4452 bool hci_req_pending(struct hci_dev *hdev)
4454 return (hdev->req_status == HCI_REQ_PEND);
4457 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4458 u32 plen, const void *param)
4460 int len = HCI_COMMAND_HDR_SIZE + plen;
4461 struct hci_command_hdr *hdr;
4462 struct sk_buff *skb;
4464 skb = bt_skb_alloc(len, GFP_ATOMIC);
4468 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4469 hdr->opcode = cpu_to_le16(opcode);
4473 memcpy(skb_put(skb, plen), param, plen);
4475 BT_DBG("skb len %d", skb->len);
4477 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4482 /* Send HCI command */
4483 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4486 struct sk_buff *skb;
4488 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4490 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4492 BT_ERR("%s no memory for command", hdev->name);
4496 /* Stand-alone HCI commands must be flaged as
4497 * single-command requests.
4499 bt_cb(skb)->req.start = true;
4501 skb_queue_tail(&hdev->cmd_q, skb);
4502 queue_work(hdev->workqueue, &hdev->cmd_work);
4507 /* Queue a command to an asynchronous HCI request */
4508 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4509 const void *param, u8 event)
4511 struct hci_dev *hdev = req->hdev;
4512 struct sk_buff *skb;
4514 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4516 /* If an error occured during request building, there is no point in
4517 * queueing the HCI command. We can simply return.
4522 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4524 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4525 hdev->name, opcode);
4530 if (skb_queue_empty(&req->cmd_q))
4531 bt_cb(skb)->req.start = true;
4533 bt_cb(skb)->req.event = event;
4535 skb_queue_tail(&req->cmd_q, skb);
4538 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4541 hci_req_add_ev(req, opcode, plen, param, 0);
4544 /* Get data from the previously sent command */
4545 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4547 struct hci_command_hdr *hdr;
4549 if (!hdev->sent_cmd)
4552 hdr = (void *) hdev->sent_cmd->data;
4554 if (hdr->opcode != cpu_to_le16(opcode))
4557 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4559 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4563 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4565 struct hci_acl_hdr *hdr;
4568 skb_push(skb, HCI_ACL_HDR_SIZE);
4569 skb_reset_transport_header(skb);
4570 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4571 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4572 hdr->dlen = cpu_to_le16(len);
4575 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4576 struct sk_buff *skb, __u16 flags)
4578 struct hci_conn *conn = chan->conn;
4579 struct hci_dev *hdev = conn->hdev;
4580 struct sk_buff *list;
4582 skb->len = skb_headlen(skb);
4585 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4587 switch (hdev->dev_type) {
4589 hci_add_acl_hdr(skb, conn->handle, flags);
4592 hci_add_acl_hdr(skb, chan->handle, flags);
4595 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4599 list = skb_shinfo(skb)->frag_list;
4601 /* Non fragmented */
4602 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4604 skb_queue_tail(queue, skb);
4607 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4609 skb_shinfo(skb)->frag_list = NULL;
4611 /* Queue all fragments atomically */
4612 spin_lock(&queue->lock);
4614 __skb_queue_tail(queue, skb);
4616 flags &= ~ACL_START;
4619 skb = list; list = list->next;
4621 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4622 hci_add_acl_hdr(skb, conn->handle, flags);
4624 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4626 __skb_queue_tail(queue, skb);
4629 spin_unlock(&queue->lock);
4633 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4635 struct hci_dev *hdev = chan->conn->hdev;
4637 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4639 hci_queue_acl(chan, &chan->data_q, skb, flags);
4641 queue_work(hdev->workqueue, &hdev->tx_work);
4645 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4647 struct hci_dev *hdev = conn->hdev;
4648 struct hci_sco_hdr hdr;
4650 BT_DBG("%s len %d", hdev->name, skb->len);
4652 hdr.handle = cpu_to_le16(conn->handle);
4653 hdr.dlen = skb->len;
4655 skb_push(skb, HCI_SCO_HDR_SIZE);
4656 skb_reset_transport_header(skb);
4657 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4659 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4661 skb_queue_tail(&conn->data_q, skb);
4662 queue_work(hdev->workqueue, &hdev->tx_work);
4665 /* ---- HCI TX task (outgoing data) ---- */
4667 /* HCI Connection scheduler */
4668 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4671 struct hci_conn_hash *h = &hdev->conn_hash;
4672 struct hci_conn *conn = NULL, *c;
4673 unsigned int num = 0, min = ~0;
4675 /* We don't have to lock device here. Connections are always
4676 * added and removed with TX task disabled. */
4680 list_for_each_entry_rcu(c, &h->list, list) {
4681 if (c->type != type || skb_queue_empty(&c->data_q))
4684 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4689 if (c->sent < min) {
4694 if (hci_conn_num(hdev, type) == num)
4703 switch (conn->type) {
4705 cnt = hdev->acl_cnt;
4709 cnt = hdev->sco_cnt;
4712 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4716 BT_ERR("Unknown link type");
4724 BT_DBG("conn %p quote %d", conn, *quote);
4728 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4730 struct hci_conn_hash *h = &hdev->conn_hash;
4733 BT_ERR("%s link tx timeout", hdev->name);
4737 /* Kill stalled connections */
4738 list_for_each_entry_rcu(c, &h->list, list) {
4739 if (c->type == type && c->sent) {
4740 BT_ERR("%s killing stalled connection %pMR",
4741 hdev->name, &c->dst);
4742 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4749 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4752 struct hci_conn_hash *h = &hdev->conn_hash;
4753 struct hci_chan *chan = NULL;
4754 unsigned int num = 0, min = ~0, cur_prio = 0;
4755 struct hci_conn *conn;
4756 int cnt, q, conn_num = 0;
4758 BT_DBG("%s", hdev->name);
4762 list_for_each_entry_rcu(conn, &h->list, list) {
4763 struct hci_chan *tmp;
4765 if (conn->type != type)
4768 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4773 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4774 struct sk_buff *skb;
4776 if (skb_queue_empty(&tmp->data_q))
4779 skb = skb_peek(&tmp->data_q);
4780 if (skb->priority < cur_prio)
4783 if (skb->priority > cur_prio) {
4786 cur_prio = skb->priority;
4791 if (conn->sent < min) {
4797 if (hci_conn_num(hdev, type) == conn_num)
4806 switch (chan->conn->type) {
4808 cnt = hdev->acl_cnt;
4811 cnt = hdev->block_cnt;
4815 cnt = hdev->sco_cnt;
4818 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4822 BT_ERR("Unknown link type");
4827 BT_DBG("chan %p quote %d", chan, *quote);
4831 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4833 struct hci_conn_hash *h = &hdev->conn_hash;
4834 struct hci_conn *conn;
4837 BT_DBG("%s", hdev->name);
4841 list_for_each_entry_rcu(conn, &h->list, list) {
4842 struct hci_chan *chan;
4844 if (conn->type != type)
4847 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4852 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4853 struct sk_buff *skb;
4860 if (skb_queue_empty(&chan->data_q))
4863 skb = skb_peek(&chan->data_q);
4864 if (skb->priority >= HCI_PRIO_MAX - 1)
4867 skb->priority = HCI_PRIO_MAX - 1;
4869 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4873 if (hci_conn_num(hdev, type) == num)
4881 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4883 /* Calculate count of blocks used by this packet */
4884 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4887 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4889 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4890 /* ACL tx timeout must be longer than maximum
4891 * link supervision timeout (40.9 seconds) */
4892 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4893 HCI_ACL_TX_TIMEOUT))
4894 hci_link_tx_to(hdev, ACL_LINK);
4898 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4900 unsigned int cnt = hdev->acl_cnt;
4901 struct hci_chan *chan;
4902 struct sk_buff *skb;
4905 __check_timeout(hdev, cnt);
4907 while (hdev->acl_cnt &&
4908 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4909 u32 priority = (skb_peek(&chan->data_q))->priority;
4910 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4911 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4912 skb->len, skb->priority);
4914 /* Stop if priority has changed */
4915 if (skb->priority < priority)
4918 skb = skb_dequeue(&chan->data_q);
4920 hci_conn_enter_active_mode(chan->conn,
4921 bt_cb(skb)->force_active);
4923 hci_send_frame(hdev, skb);
4924 hdev->acl_last_tx = jiffies;
4932 if (cnt != hdev->acl_cnt)
4933 hci_prio_recalculate(hdev, ACL_LINK);
4936 static void hci_sched_acl_blk(struct hci_dev *hdev)
4938 unsigned int cnt = hdev->block_cnt;
4939 struct hci_chan *chan;
4940 struct sk_buff *skb;
4944 __check_timeout(hdev, cnt);
4946 BT_DBG("%s", hdev->name);
4948 if (hdev->dev_type == HCI_AMP)
4953 while (hdev->block_cnt > 0 &&
4954 (chan = hci_chan_sent(hdev, type, "e))) {
4955 u32 priority = (skb_peek(&chan->data_q))->priority;
4956 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4959 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4960 skb->len, skb->priority);
4962 /* Stop if priority has changed */
4963 if (skb->priority < priority)
4966 skb = skb_dequeue(&chan->data_q);
4968 blocks = __get_blocks(hdev, skb);
4969 if (blocks > hdev->block_cnt)
4972 hci_conn_enter_active_mode(chan->conn,
4973 bt_cb(skb)->force_active);
4975 hci_send_frame(hdev, skb);
4976 hdev->acl_last_tx = jiffies;
4978 hdev->block_cnt -= blocks;
4981 chan->sent += blocks;
4982 chan->conn->sent += blocks;
4986 if (cnt != hdev->block_cnt)
4987 hci_prio_recalculate(hdev, type);
4990 static void hci_sched_acl(struct hci_dev *hdev)
4992 BT_DBG("%s", hdev->name);
4994 /* No ACL link over BR/EDR controller */
4995 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4998 /* No AMP link over AMP controller */
4999 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5002 switch (hdev->flow_ctl_mode) {
5003 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5004 hci_sched_acl_pkt(hdev);
5007 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5008 hci_sched_acl_blk(hdev);
5014 static void hci_sched_sco(struct hci_dev *hdev)
5016 struct hci_conn *conn;
5017 struct sk_buff *skb;
5020 BT_DBG("%s", hdev->name);
5022 if (!hci_conn_num(hdev, SCO_LINK))
5025 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
5026 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5027 BT_DBG("skb %p len %d", skb, skb->len);
5028 hci_send_frame(hdev, skb);
5031 if (conn->sent == ~0)
5037 static void hci_sched_esco(struct hci_dev *hdev)
5039 struct hci_conn *conn;
5040 struct sk_buff *skb;
5043 BT_DBG("%s", hdev->name);
5045 if (!hci_conn_num(hdev, ESCO_LINK))
5048 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5050 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5051 BT_DBG("skb %p len %d", skb, skb->len);
5052 hci_send_frame(hdev, skb);
5055 if (conn->sent == ~0)
5061 static void hci_sched_le(struct hci_dev *hdev)
5063 struct hci_chan *chan;
5064 struct sk_buff *skb;
5065 int quote, cnt, tmp;
5067 BT_DBG("%s", hdev->name);
5069 if (!hci_conn_num(hdev, LE_LINK))
5072 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5073 /* LE tx timeout must be longer than maximum
5074 * link supervision timeout (40.9 seconds) */
5075 if (!hdev->le_cnt && hdev->le_pkts &&
5076 time_after(jiffies, hdev->le_last_tx + HZ * 45))
5077 hci_link_tx_to(hdev, LE_LINK);
5080 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5082 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
5083 u32 priority = (skb_peek(&chan->data_q))->priority;
5084 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5085 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5086 skb->len, skb->priority);
5088 /* Stop if priority has changed */
5089 if (skb->priority < priority)
5092 skb = skb_dequeue(&chan->data_q);
5094 hci_send_frame(hdev, skb);
5095 hdev->le_last_tx = jiffies;
5106 hdev->acl_cnt = cnt;
5109 hci_prio_recalculate(hdev, LE_LINK);
5112 static void hci_tx_work(struct work_struct *work)
5114 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5115 struct sk_buff *skb;
5117 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5118 hdev->sco_cnt, hdev->le_cnt);
5120 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5121 /* Schedule queues and send stuff to HCI driver */
5122 hci_sched_acl(hdev);
5123 hci_sched_sco(hdev);
5124 hci_sched_esco(hdev);
5128 /* Send next queued raw (unknown type) packet */
5129 while ((skb = skb_dequeue(&hdev->raw_q)))
5130 hci_send_frame(hdev, skb);
5133 /* ----- HCI RX task (incoming data processing) ----- */
5135 /* ACL data packet */
5136 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5138 struct hci_acl_hdr *hdr = (void *) skb->data;
5139 struct hci_conn *conn;
5140 __u16 handle, flags;
5142 skb_pull(skb, HCI_ACL_HDR_SIZE);
5144 handle = __le16_to_cpu(hdr->handle);
5145 flags = hci_flags(handle);
5146 handle = hci_handle(handle);
5148 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5151 hdev->stat.acl_rx++;
5154 conn = hci_conn_hash_lookup_handle(hdev, handle);
5155 hci_dev_unlock(hdev);
5158 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5160 /* Send to upper protocol */
5161 l2cap_recv_acldata(conn, skb, flags);
5164 BT_ERR("%s ACL packet for unknown connection handle %d",
5165 hdev->name, handle);
5171 /* SCO data packet */
5172 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5174 struct hci_sco_hdr *hdr = (void *) skb->data;
5175 struct hci_conn *conn;
5178 skb_pull(skb, HCI_SCO_HDR_SIZE);
5180 handle = __le16_to_cpu(hdr->handle);
5182 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5184 hdev->stat.sco_rx++;
5187 conn = hci_conn_hash_lookup_handle(hdev, handle);
5188 hci_dev_unlock(hdev);
5191 /* Send to upper protocol */
5192 sco_recv_scodata(conn, skb);
5195 BT_ERR("%s SCO packet for unknown connection handle %d",
5196 hdev->name, handle);
5202 static bool hci_req_is_complete(struct hci_dev *hdev)
5204 struct sk_buff *skb;
5206 skb = skb_peek(&hdev->cmd_q);
5210 return bt_cb(skb)->req.start;
5213 static void hci_resend_last(struct hci_dev *hdev)
5215 struct hci_command_hdr *sent;
5216 struct sk_buff *skb;
5219 if (!hdev->sent_cmd)
5222 sent = (void *) hdev->sent_cmd->data;
5223 opcode = __le16_to_cpu(sent->opcode);
5224 if (opcode == HCI_OP_RESET)
5227 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5231 skb_queue_head(&hdev->cmd_q, skb);
5232 queue_work(hdev->workqueue, &hdev->cmd_work);
5235 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5237 hci_req_complete_t req_complete = NULL;
5238 struct sk_buff *skb;
5239 unsigned long flags;
5241 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5243 /* If the completed command doesn't match the last one that was
5244 * sent we need to do special handling of it.
5246 if (!hci_sent_cmd_data(hdev, opcode)) {
5247 /* Some CSR based controllers generate a spontaneous
5248 * reset complete event during init and any pending
5249 * command will never be completed. In such a case we
5250 * need to resend whatever was the last sent
5253 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5254 hci_resend_last(hdev);
5259 /* If the command succeeded and there's still more commands in
5260 * this request the request is not yet complete.
5262 if (!status && !hci_req_is_complete(hdev))
5265 /* If this was the last command in a request the complete
5266 * callback would be found in hdev->sent_cmd instead of the
5267 * command queue (hdev->cmd_q).
5269 if (hdev->sent_cmd) {
5270 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5273 /* We must set the complete callback to NULL to
5274 * avoid calling the callback more than once if
5275 * this function gets called again.
5277 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5283 /* Remove all pending commands belonging to this request */
5284 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5285 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5286 if (bt_cb(skb)->req.start) {
5287 __skb_queue_head(&hdev->cmd_q, skb);
5291 req_complete = bt_cb(skb)->req.complete;
5294 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5298 req_complete(hdev, status);
5301 static void hci_rx_work(struct work_struct *work)
5303 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5304 struct sk_buff *skb;
5306 BT_DBG("%s", hdev->name);
5308 while ((skb = skb_dequeue(&hdev->rx_q))) {
5309 /* Send copy to monitor */
5310 hci_send_to_monitor(hdev, skb);
5312 if (atomic_read(&hdev->promisc)) {
5313 /* Send copy to the sockets */
5314 hci_send_to_sock(hdev, skb);
5317 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5322 if (test_bit(HCI_INIT, &hdev->flags)) {
5323 /* Don't process data packets in this states. */
5324 switch (bt_cb(skb)->pkt_type) {
5325 case HCI_ACLDATA_PKT:
5326 case HCI_SCODATA_PKT:
5333 switch (bt_cb(skb)->pkt_type) {
5335 BT_DBG("%s Event packet", hdev->name);
5336 hci_event_packet(hdev, skb);
5339 case HCI_ACLDATA_PKT:
5340 BT_DBG("%s ACL data packet", hdev->name);
5341 hci_acldata_packet(hdev, skb);
5344 case HCI_SCODATA_PKT:
5345 BT_DBG("%s SCO data packet", hdev->name);
5346 hci_scodata_packet(hdev, skb);
5356 static void hci_cmd_work(struct work_struct *work)
5358 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5359 struct sk_buff *skb;
5361 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5362 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5364 /* Send queued commands */
5365 if (atomic_read(&hdev->cmd_cnt)) {
5366 skb = skb_dequeue(&hdev->cmd_q);
5370 kfree_skb(hdev->sent_cmd);
5372 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5373 if (hdev->sent_cmd) {
5374 atomic_dec(&hdev->cmd_cnt);
5375 hci_send_frame(hdev, skb);
5376 if (test_bit(HCI_RESET, &hdev->flags))
5377 cancel_delayed_work(&hdev->cmd_timer);
5379 schedule_delayed_work(&hdev->cmd_timer,
5382 skb_queue_head(&hdev->cmd_q, skb);
5383 queue_work(hdev->workqueue, &hdev->cmd_work);
5388 void hci_req_add_le_scan_disable(struct hci_request *req)
5390 struct hci_cp_le_set_scan_enable cp;
5392 memset(&cp, 0, sizeof(cp));
5393 cp.enable = LE_SCAN_DISABLE;
5394 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5397 void hci_req_add_le_passive_scan(struct hci_request *req)
5399 struct hci_cp_le_set_scan_param param_cp;
5400 struct hci_cp_le_set_scan_enable enable_cp;
5401 struct hci_dev *hdev = req->hdev;
5404 /* Set require_privacy to false since no SCAN_REQ are send
5405 * during passive scanning. Not using an unresolvable address
5406 * here is important so that peer devices using direct
5407 * advertising with our address will be correctly reported
5408 * by the controller.
5410 if (hci_update_random_address(req, false, &own_addr_type))
5413 memset(¶m_cp, 0, sizeof(param_cp));
5414 param_cp.type = LE_SCAN_PASSIVE;
5415 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5416 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5417 param_cp.own_address_type = own_addr_type;
5418 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5421 memset(&enable_cp, 0, sizeof(enable_cp));
5422 enable_cp.enable = LE_SCAN_ENABLE;
5423 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5424 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5428 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5431 BT_DBG("HCI request failed to update background scanning: "
5432 "status 0x%2.2x", status);
5435 /* This function controls the background scanning based on hdev->pend_le_conns
5436 * list. If there are pending LE connection we start the background scanning,
5437 * otherwise we stop it.
5439 * This function requires the caller holds hdev->lock.
5441 void hci_update_background_scan(struct hci_dev *hdev)
5443 struct hci_request req;
5444 struct hci_conn *conn;
5447 if (!test_bit(HCI_UP, &hdev->flags) ||
5448 test_bit(HCI_INIT, &hdev->flags) ||
5449 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5450 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5451 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5452 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5455 /* No point in doing scanning if LE support hasn't been enabled */
5456 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5459 /* If discovery is active don't interfere with it */
5460 if (hdev->discovery.state != DISCOVERY_STOPPED)
5463 hci_req_init(&req, hdev);
5465 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5466 list_empty(&hdev->pend_le_conns) &&
5467 list_empty(&hdev->pend_le_reports)) {
5468 /* If there is no pending LE connections or devices
5469 * to be scanned for, we should stop the background
5473 /* If controller is not scanning we are done. */
5474 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5477 hci_req_add_le_scan_disable(&req);
5479 BT_DBG("%s stopping background scanning", hdev->name);
5481 /* If there is at least one pending LE connection, we should
5482 * keep the background scan running.
5485 /* If controller is connecting, we should not start scanning
5486 * since some controllers are not able to scan and connect at
5489 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5493 /* If controller is currently scanning, we stop it to ensure we
5494 * don't miss any advertising (due to duplicates filter).
5496 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5497 hci_req_add_le_scan_disable(&req);
5499 hci_req_add_le_passive_scan(&req);
5501 BT_DBG("%s starting background scanning", hdev->name);
5504 err = hci_req_run(&req, update_background_scan_complete);
5506 BT_ERR("Failed to run HCI request: err %d", err);