2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
57 /* ----- HCI requests ----- */
59 #define HCI_REQ_DONE 0
60 #define HCI_REQ_PEND 1
61 #define HCI_REQ_CANCELED 2
63 #define hci_req_lock(d) mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
66 /* ---- HCI notifications ---- */
68 static void hci_notify(struct hci_dev *hdev, int event)
70 hci_sock_dev_event(hdev, event);
73 /* ---- HCI debugfs entries ---- */
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
78 struct hci_dev *hdev = file->private_data;
81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
90 struct hci_dev *hdev = file->private_data;
93 size_t buf_size = min(count, (sizeof(buf)-1));
97 if (!test_bit(HCI_UP, &hdev->flags))
100 if (copy_from_user(buf, user_buf, buf_size))
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 hci_req_unlock(hdev);
122 err = -bt_to_errno(skb->data[0]);
128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
133 static const struct file_operations dut_mode_fops = {
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
140 static int features_show(struct seq_file *f, void *ptr)
142 struct hci_dev *hdev = f->private;
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
161 hci_dev_unlock(hdev);
166 static int features_open(struct inode *inode, struct file *file)
168 return single_open(file, features_show, inode->i_private);
171 static const struct file_operations features_fops = {
172 .open = features_open,
175 .release = single_release,
178 static int blacklist_show(struct seq_file *f, void *p)
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
184 list_for_each_entry(b, &hdev->blacklist, list)
185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186 hci_dev_unlock(hdev);
191 static int blacklist_open(struct inode *inode, struct file *file)
193 return single_open(file, blacklist_show, inode->i_private);
196 static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
200 .release = single_release,
203 static int whitelist_show(struct seq_file *f, void *p)
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
216 static int whitelist_open(struct inode *inode, struct file *file)
218 return single_open(file, whitelist_show, inode->i_private);
221 static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
225 .release = single_release,
228 static int uuids_show(struct seq_file *f, void *p)
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
234 list_for_each_entry(uuid, &hdev->uuids, list) {
237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
244 seq_printf(f, "%pUb\n", val);
246 hci_dev_unlock(hdev);
251 static int uuids_open(struct inode *inode, struct file *file)
253 return single_open(file, uuids_show, inode->i_private);
256 static const struct file_operations uuids_fops = {
260 .release = single_release,
263 static int inquiry_cache_show(struct seq_file *f, void *p)
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
282 hci_dev_unlock(hdev);
287 static int inquiry_cache_open(struct inode *inode, struct file *file)
289 return single_open(file, inquiry_cache_show, inode->i_private);
292 static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
296 .release = single_release,
299 static int link_keys_show(struct seq_file *f, void *ptr)
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
310 hci_dev_unlock(hdev);
315 static int link_keys_open(struct inode *inode, struct file *file)
317 return single_open(file, link_keys_show, inode->i_private);
320 static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
324 .release = single_release,
327 static int dev_class_show(struct seq_file *f, void *ptr)
329 struct hci_dev *hdev = f->private;
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
339 static int dev_class_open(struct inode *inode, struct file *file)
341 return single_open(file, dev_class_show, inode->i_private);
344 static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
348 .release = single_release,
351 static int voice_setting_get(void *data, u64 *val)
353 struct hci_dev *hdev = data;
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
362 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
365 static int auto_accept_delay_set(void *data, u64 val)
367 struct hci_dev *hdev = data;
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
376 static int auto_accept_delay_get(void *data, u64 *val)
378 struct hci_dev *hdev = data;
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
387 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
390 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
393 struct hci_dev *hdev = file->private_data;
396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
402 static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
406 struct hci_dev *hdev = file->private_data;
408 size_t buf_size = min(count, (sizeof(buf)-1));
411 if (test_bit(HCI_UP, &hdev->flags))
414 if (copy_from_user(buf, user_buf, buf_size))
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
429 static const struct file_operations force_sc_support_fops = {
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
436 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
439 struct hci_dev *hdev = file->private_data;
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
448 static const struct file_operations sc_only_mode_fops = {
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
454 static int idle_timeout_set(void *data, u64 val)
456 struct hci_dev *hdev = data;
458 if (val != 0 && (val < 500 || val > 3600000))
462 hdev->idle_timeout = val;
463 hci_dev_unlock(hdev);
468 static int idle_timeout_get(void *data, u64 *val)
470 struct hci_dev *hdev = data;
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
479 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
482 static int rpa_timeout_set(void *data, u64 val)
484 struct hci_dev *hdev = data;
486 /* Require the RPA timeout to be at least 30 seconds and at most
489 if (val < 30 || val > (60 * 60 * 24))
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
499 static int rpa_timeout_get(void *data, u64 *val)
501 struct hci_dev *hdev = data;
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
510 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
513 static int sniff_min_interval_set(void *data, u64 val)
515 struct hci_dev *hdev = data;
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
521 hdev->sniff_min_interval = val;
522 hci_dev_unlock(hdev);
527 static int sniff_min_interval_get(void *data, u64 *val)
529 struct hci_dev *hdev = data;
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
538 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
541 static int sniff_max_interval_set(void *data, u64 val)
543 struct hci_dev *hdev = data;
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
549 hdev->sniff_max_interval = val;
550 hci_dev_unlock(hdev);
555 static int sniff_max_interval_get(void *data, u64 *val)
557 struct hci_dev *hdev = data;
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
566 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
569 static int conn_info_min_age_set(void *data, u64 val)
571 struct hci_dev *hdev = data;
573 if (val == 0 || val > hdev->conn_info_max_age)
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
583 static int conn_info_min_age_get(void *data, u64 *val)
585 struct hci_dev *hdev = data;
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
594 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
597 static int conn_info_max_age_set(void *data, u64 val)
599 struct hci_dev *hdev = data;
601 if (val == 0 || val < hdev->conn_info_min_age)
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
611 static int conn_info_max_age_get(void *data, u64 *val)
613 struct hci_dev *hdev = data;
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
622 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
625 static int identity_show(struct seq_file *f, void *p)
627 struct hci_dev *hdev = f->private;
633 hci_copy_identity_address(hdev, &addr, &addr_type);
635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
636 16, hdev->irk, &hdev->rpa);
638 hci_dev_unlock(hdev);
643 static int identity_open(struct inode *inode, struct file *file)
645 return single_open(file, identity_show, inode->i_private);
648 static const struct file_operations identity_fops = {
649 .open = identity_open,
652 .release = single_release,
655 static int random_address_show(struct seq_file *f, void *p)
657 struct hci_dev *hdev = f->private;
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
666 static int random_address_open(struct inode *inode, struct file *file)
668 return single_open(file, random_address_show, inode->i_private);
671 static const struct file_operations random_address_fops = {
672 .open = random_address_open,
675 .release = single_release,
678 static int static_address_show(struct seq_file *f, void *p)
680 struct hci_dev *hdev = f->private;
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
689 static int static_address_open(struct inode *inode, struct file *file)
691 return single_open(file, static_address_show, inode->i_private);
694 static const struct file_operations static_address_fops = {
695 .open = static_address_open,
698 .release = single_release,
701 static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
705 struct hci_dev *hdev = file->private_data;
708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
714 static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
718 struct hci_dev *hdev = file->private_data;
720 size_t buf_size = min(count, (sizeof(buf)-1));
723 if (test_bit(HCI_UP, &hdev->flags))
726 if (copy_from_user(buf, user_buf, buf_size))
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
741 static const struct file_operations force_static_address_fops = {
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
748 static int white_list_show(struct seq_file *f, void *ptr)
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
761 static int white_list_open(struct inode *inode, struct file *file)
763 return single_open(file, white_list_show, inode->i_private);
766 static const struct file_operations white_list_fops = {
767 .open = white_list_open,
770 .release = single_release,
773 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
785 hci_dev_unlock(hdev);
790 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
792 return single_open(file, identity_resolving_keys_show,
796 static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
800 .release = single_release,
803 static int long_term_keys_show(struct seq_file *f, void *ptr)
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
809 list_for_each_safe(p, n, &hdev->long_term_keys) {
810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
812 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
814 __le64_to_cpu(ltk->rand), 16, ltk->val);
816 hci_dev_unlock(hdev);
821 static int long_term_keys_open(struct inode *inode, struct file *file)
823 return single_open(file, long_term_keys_show, inode->i_private);
826 static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
830 .release = single_release,
833 static int conn_min_interval_set(void *data, u64 val)
835 struct hci_dev *hdev = data;
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
841 hdev->le_conn_min_interval = val;
842 hci_dev_unlock(hdev);
847 static int conn_min_interval_get(void *data, u64 *val)
849 struct hci_dev *hdev = data;
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
858 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
861 static int conn_max_interval_set(void *data, u64 val)
863 struct hci_dev *hdev = data;
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
869 hdev->le_conn_max_interval = val;
870 hci_dev_unlock(hdev);
875 static int conn_max_interval_get(void *data, u64 *val)
877 struct hci_dev *hdev = data;
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
886 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
889 static int conn_latency_set(void *data, u64 val)
891 struct hci_dev *hdev = data;
897 hdev->le_conn_latency = val;
898 hci_dev_unlock(hdev);
903 static int conn_latency_get(void *data, u64 *val)
905 struct hci_dev *hdev = data;
908 *val = hdev->le_conn_latency;
909 hci_dev_unlock(hdev);
914 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
917 static int supervision_timeout_set(void *data, u64 val)
919 struct hci_dev *hdev = data;
921 if (val < 0x000a || val > 0x0c80)
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
931 static int supervision_timeout_get(void *data, u64 *val)
933 struct hci_dev *hdev = data;
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
942 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
945 static int adv_channel_map_set(void *data, u64 val)
947 struct hci_dev *hdev = data;
949 if (val < 0x01 || val > 0x07)
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
959 static int adv_channel_map_get(void *data, u64 *val)
961 struct hci_dev *hdev = data;
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
970 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
973 static int device_list_show(struct seq_file *f, void *ptr)
975 struct hci_dev *hdev = f->private;
976 struct hci_conn_params *p;
979 list_for_each_entry(p, &hdev->le_conn_params, list) {
980 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
983 hci_dev_unlock(hdev);
988 static int device_list_open(struct inode *inode, struct file *file)
990 return single_open(file, device_list_show, inode->i_private);
993 static const struct file_operations device_list_fops = {
994 .open = device_list_open,
997 .release = single_release,
1000 /* ---- HCI requests ---- */
1002 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1004 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1006 if (hdev->req_status == HCI_REQ_PEND) {
1007 hdev->req_result = result;
1008 hdev->req_status = HCI_REQ_DONE;
1009 wake_up_interruptible(&hdev->req_wait_q);
1013 static void hci_req_cancel(struct hci_dev *hdev, int err)
1015 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1017 if (hdev->req_status == HCI_REQ_PEND) {
1018 hdev->req_result = err;
1019 hdev->req_status = HCI_REQ_CANCELED;
1020 wake_up_interruptible(&hdev->req_wait_q);
1024 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1027 struct hci_ev_cmd_complete *ev;
1028 struct hci_event_hdr *hdr;
1029 struct sk_buff *skb;
1033 skb = hdev->recv_evt;
1034 hdev->recv_evt = NULL;
1036 hci_dev_unlock(hdev);
1039 return ERR_PTR(-ENODATA);
1041 if (skb->len < sizeof(*hdr)) {
1042 BT_ERR("Too short HCI event");
1046 hdr = (void *) skb->data;
1047 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1050 if (hdr->evt != event)
1055 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1056 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1060 if (skb->len < sizeof(*ev)) {
1061 BT_ERR("Too short cmd_complete event");
1065 ev = (void *) skb->data;
1066 skb_pull(skb, sizeof(*ev));
1068 if (opcode == __le16_to_cpu(ev->opcode))
1071 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1072 __le16_to_cpu(ev->opcode));
1076 return ERR_PTR(-ENODATA);
1079 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1080 const void *param, u8 event, u32 timeout)
1082 DECLARE_WAITQUEUE(wait, current);
1083 struct hci_request req;
1086 BT_DBG("%s", hdev->name);
1088 hci_req_init(&req, hdev);
1090 hci_req_add_ev(&req, opcode, plen, param, event);
1092 hdev->req_status = HCI_REQ_PEND;
1094 err = hci_req_run(&req, hci_req_sync_complete);
1096 return ERR_PTR(err);
1098 add_wait_queue(&hdev->req_wait_q, &wait);
1099 set_current_state(TASK_INTERRUPTIBLE);
1101 schedule_timeout(timeout);
1103 remove_wait_queue(&hdev->req_wait_q, &wait);
1105 if (signal_pending(current))
1106 return ERR_PTR(-EINTR);
1108 switch (hdev->req_status) {
1110 err = -bt_to_errno(hdev->req_result);
1113 case HCI_REQ_CANCELED:
1114 err = -hdev->req_result;
1122 hdev->req_status = hdev->req_result = 0;
1124 BT_DBG("%s end: err %d", hdev->name, err);
1127 return ERR_PTR(err);
1129 return hci_get_cmd_complete(hdev, opcode, event);
1131 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1133 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1134 const void *param, u32 timeout)
1136 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1138 EXPORT_SYMBOL(__hci_cmd_sync);
1140 /* Execute request and wait for completion. */
1141 static int __hci_req_sync(struct hci_dev *hdev,
1142 void (*func)(struct hci_request *req,
1144 unsigned long opt, __u32 timeout)
1146 struct hci_request req;
1147 DECLARE_WAITQUEUE(wait, current);
1150 BT_DBG("%s start", hdev->name);
1152 hci_req_init(&req, hdev);
1154 hdev->req_status = HCI_REQ_PEND;
1158 err = hci_req_run(&req, hci_req_sync_complete);
1160 hdev->req_status = 0;
1162 /* ENODATA means the HCI request command queue is empty.
1163 * This can happen when a request with conditionals doesn't
1164 * trigger any commands to be sent. This is normal behavior
1165 * and should not trigger an error return.
1167 if (err == -ENODATA)
1173 add_wait_queue(&hdev->req_wait_q, &wait);
1174 set_current_state(TASK_INTERRUPTIBLE);
1176 schedule_timeout(timeout);
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1180 if (signal_pending(current))
1183 switch (hdev->req_status) {
1185 err = -bt_to_errno(hdev->req_result);
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1197 hdev->req_status = hdev->req_result = 0;
1199 BT_DBG("%s end: err %d", hdev->name, err);
1204 static int hci_req_sync(struct hci_dev *hdev,
1205 void (*req)(struct hci_request *req,
1207 unsigned long opt, __u32 timeout)
1211 if (!test_bit(HCI_UP, &hdev->flags))
1214 /* Serialize all requests */
1216 ret = __hci_req_sync(hdev, req, opt, timeout);
1217 hci_req_unlock(hdev);
1222 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1224 BT_DBG("%s %ld", req->hdev->name, opt);
1227 set_bit(HCI_RESET, &req->hdev->flags);
1228 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1231 static void bredr_init(struct hci_request *req)
1233 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1235 /* Read Local Supported Features */
1236 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1238 /* Read Local Version */
1239 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1241 /* Read BD Address */
1242 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1245 static void amp_init(struct hci_request *req)
1247 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1249 /* Read Local Version */
1250 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1252 /* Read Local Supported Commands */
1253 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1255 /* Read Local Supported Features */
1256 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1258 /* Read Local AMP Info */
1259 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1261 /* Read Data Blk size */
1262 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1264 /* Read Flow Control Mode */
1265 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1267 /* Read Location Data */
1268 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1271 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1273 struct hci_dev *hdev = req->hdev;
1275 BT_DBG("%s %ld", hdev->name, opt);
1278 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1279 hci_reset_req(req, 0);
1281 switch (hdev->dev_type) {
1291 BT_ERR("Unknown device type %d", hdev->dev_type);
1296 static void bredr_setup(struct hci_request *req)
1298 struct hci_dev *hdev = req->hdev;
1303 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1304 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1306 /* Read Class of Device */
1307 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1309 /* Read Local Name */
1310 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1312 /* Read Voice Setting */
1313 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1315 /* Read Number of Supported IAC */
1316 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1318 /* Read Current IAC LAP */
1319 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1321 /* Clear Event Filters */
1322 flt_type = HCI_FLT_CLEAR_ALL;
1323 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1325 /* Connection accept timeout ~20 secs */
1326 param = cpu_to_le16(0x7d00);
1327 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1329 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1330 * but it does not support page scan related HCI commands.
1332 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1333 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1334 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1338 static void le_setup(struct hci_request *req)
1340 struct hci_dev *hdev = req->hdev;
1342 /* Read LE Buffer Size */
1343 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1345 /* Read LE Local Supported Features */
1346 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1348 /* Read LE Supported States */
1349 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1351 /* Read LE White List Size */
1352 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1354 /* Clear LE White List */
1355 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1357 /* LE-only controllers have LE implicitly enabled */
1358 if (!lmp_bredr_capable(hdev))
1359 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1362 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1364 if (lmp_ext_inq_capable(hdev))
1367 if (lmp_inq_rssi_capable(hdev))
1370 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1371 hdev->lmp_subver == 0x0757)
1374 if (hdev->manufacturer == 15) {
1375 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1377 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1379 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1383 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1384 hdev->lmp_subver == 0x1805)
1390 static void hci_setup_inquiry_mode(struct hci_request *req)
1394 mode = hci_get_inquiry_mode(req->hdev);
1396 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1399 static void hci_setup_event_mask(struct hci_request *req)
1401 struct hci_dev *hdev = req->hdev;
1403 /* The second byte is 0xff instead of 0x9f (two reserved bits
1404 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1405 * command otherwise.
1407 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1409 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1410 * any event mask for pre 1.2 devices.
1412 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1415 if (lmp_bredr_capable(hdev)) {
1416 events[4] |= 0x01; /* Flow Specification Complete */
1417 events[4] |= 0x02; /* Inquiry Result with RSSI */
1418 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1419 events[5] |= 0x08; /* Synchronous Connection Complete */
1420 events[5] |= 0x10; /* Synchronous Connection Changed */
1422 /* Use a different default for LE-only devices */
1423 memset(events, 0, sizeof(events));
1424 events[0] |= 0x10; /* Disconnection Complete */
1425 events[1] |= 0x08; /* Read Remote Version Information Complete */
1426 events[1] |= 0x20; /* Command Complete */
1427 events[1] |= 0x40; /* Command Status */
1428 events[1] |= 0x80; /* Hardware Error */
1429 events[2] |= 0x04; /* Number of Completed Packets */
1430 events[3] |= 0x02; /* Data Buffer Overflow */
1432 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1433 events[0] |= 0x80; /* Encryption Change */
1434 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1438 if (lmp_inq_rssi_capable(hdev))
1439 events[4] |= 0x02; /* Inquiry Result with RSSI */
1441 if (lmp_sniffsubr_capable(hdev))
1442 events[5] |= 0x20; /* Sniff Subrating */
1444 if (lmp_pause_enc_capable(hdev))
1445 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1447 if (lmp_ext_inq_capable(hdev))
1448 events[5] |= 0x40; /* Extended Inquiry Result */
1450 if (lmp_no_flush_capable(hdev))
1451 events[7] |= 0x01; /* Enhanced Flush Complete */
1453 if (lmp_lsto_capable(hdev))
1454 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1456 if (lmp_ssp_capable(hdev)) {
1457 events[6] |= 0x01; /* IO Capability Request */
1458 events[6] |= 0x02; /* IO Capability Response */
1459 events[6] |= 0x04; /* User Confirmation Request */
1460 events[6] |= 0x08; /* User Passkey Request */
1461 events[6] |= 0x10; /* Remote OOB Data Request */
1462 events[6] |= 0x20; /* Simple Pairing Complete */
1463 events[7] |= 0x04; /* User Passkey Notification */
1464 events[7] |= 0x08; /* Keypress Notification */
1465 events[7] |= 0x10; /* Remote Host Supported
1466 * Features Notification
1470 if (lmp_le_capable(hdev))
1471 events[7] |= 0x20; /* LE Meta-Event */
1473 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1476 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1478 struct hci_dev *hdev = req->hdev;
1480 if (lmp_bredr_capable(hdev))
1483 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1485 if (lmp_le_capable(hdev))
1488 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1489 * local supported commands HCI command.
1491 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1492 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1494 if (lmp_ssp_capable(hdev)) {
1495 /* When SSP is available, then the host features page
1496 * should also be available as well. However some
1497 * controllers list the max_page as 0 as long as SSP
1498 * has not been enabled. To achieve proper debugging
1499 * output, force the minimum max_page to 1 at least.
1501 hdev->max_page = 0x01;
1503 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1505 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1506 sizeof(mode), &mode);
1508 struct hci_cp_write_eir cp;
1510 memset(hdev->eir, 0, sizeof(hdev->eir));
1511 memset(&cp, 0, sizeof(cp));
1513 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1517 if (lmp_inq_rssi_capable(hdev))
1518 hci_setup_inquiry_mode(req);
1520 if (lmp_inq_tx_pwr_capable(hdev))
1521 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1523 if (lmp_ext_feat_capable(hdev)) {
1524 struct hci_cp_read_local_ext_features cp;
1527 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1531 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1533 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1538 static void hci_setup_link_policy(struct hci_request *req)
1540 struct hci_dev *hdev = req->hdev;
1541 struct hci_cp_write_def_link_policy cp;
1542 u16 link_policy = 0;
1544 if (lmp_rswitch_capable(hdev))
1545 link_policy |= HCI_LP_RSWITCH;
1546 if (lmp_hold_capable(hdev))
1547 link_policy |= HCI_LP_HOLD;
1548 if (lmp_sniff_capable(hdev))
1549 link_policy |= HCI_LP_SNIFF;
1550 if (lmp_park_capable(hdev))
1551 link_policy |= HCI_LP_PARK;
1553 cp.policy = cpu_to_le16(link_policy);
1554 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1557 static void hci_set_le_support(struct hci_request *req)
1559 struct hci_dev *hdev = req->hdev;
1560 struct hci_cp_write_le_host_supported cp;
1562 /* LE-only devices do not support explicit enablement */
1563 if (!lmp_bredr_capable(hdev))
1566 memset(&cp, 0, sizeof(cp));
1568 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1570 cp.simul = lmp_le_br_capable(hdev);
1573 if (cp.le != lmp_host_le_capable(hdev))
1574 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1578 static void hci_set_event_mask_page_2(struct hci_request *req)
1580 struct hci_dev *hdev = req->hdev;
1581 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1583 /* If Connectionless Slave Broadcast master role is supported
1584 * enable all necessary events for it.
1586 if (lmp_csb_master_capable(hdev)) {
1587 events[1] |= 0x40; /* Triggered Clock Capture */
1588 events[1] |= 0x80; /* Synchronization Train Complete */
1589 events[2] |= 0x10; /* Slave Page Response Timeout */
1590 events[2] |= 0x20; /* CSB Channel Map Change */
1593 /* If Connectionless Slave Broadcast slave role is supported
1594 * enable all necessary events for it.
1596 if (lmp_csb_slave_capable(hdev)) {
1597 events[2] |= 0x01; /* Synchronization Train Received */
1598 events[2] |= 0x02; /* CSB Receive */
1599 events[2] |= 0x04; /* CSB Timeout */
1600 events[2] |= 0x08; /* Truncated Page Complete */
1603 /* Enable Authenticated Payload Timeout Expired event if supported */
1604 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1607 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1610 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1612 struct hci_dev *hdev = req->hdev;
1615 hci_setup_event_mask(req);
1617 /* Some Broadcom based Bluetooth controllers do not support the
1618 * Delete Stored Link Key command. They are clearly indicating its
1619 * absence in the bit mask of supported commands.
1621 * Check the supported commands and only if the the command is marked
1622 * as supported send it. If not supported assume that the controller
1623 * does not have actual support for stored link keys which makes this
1624 * command redundant anyway.
1626 * Some controllers indicate that they support handling deleting
1627 * stored link keys, but they don't. The quirk lets a driver
1628 * just disable this command.
1630 if (hdev->commands[6] & 0x80 &&
1631 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1632 struct hci_cp_delete_stored_link_key cp;
1634 bacpy(&cp.bdaddr, BDADDR_ANY);
1635 cp.delete_all = 0x01;
1636 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1640 if (hdev->commands[5] & 0x10)
1641 hci_setup_link_policy(req);
1643 if (lmp_le_capable(hdev)) {
1646 memset(events, 0, sizeof(events));
1649 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1650 events[0] |= 0x10; /* LE Long Term Key Request */
1652 /* If controller supports the Connection Parameters Request
1653 * Link Layer Procedure, enable the corresponding event.
1655 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1656 events[0] |= 0x20; /* LE Remote Connection
1660 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1663 if (hdev->commands[25] & 0x40) {
1664 /* Read LE Advertising Channel TX Power */
1665 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1668 hci_set_le_support(req);
1671 /* Read features beyond page 1 if available */
1672 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1673 struct hci_cp_read_local_ext_features cp;
1676 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1681 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1683 struct hci_dev *hdev = req->hdev;
1685 /* Set event mask page 2 if the HCI command for it is supported */
1686 if (hdev->commands[22] & 0x04)
1687 hci_set_event_mask_page_2(req);
1689 /* Read local codec list if the HCI command is supported */
1690 if (hdev->commands[29] & 0x20)
1691 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1693 /* Check for Synchronization Train support */
1694 if (lmp_sync_train_capable(hdev))
1695 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1697 /* Enable Secure Connections if supported and configured */
1698 if ((lmp_sc_capable(hdev) ||
1699 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1700 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1702 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1703 sizeof(support), &support);
1707 static int __hci_init(struct hci_dev *hdev)
1711 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1715 /* The Device Under Test (DUT) mode is special and available for
1716 * all controller types. So just create it early on.
1718 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1719 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1723 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1724 * BR/EDR/LE type controllers. AMP controllers only need the
1727 if (hdev->dev_type != HCI_BREDR)
1730 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1734 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1738 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1742 /* Only create debugfs entries during the initial setup
1743 * phase and not every time the controller gets powered on.
1745 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1748 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1750 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1751 &hdev->manufacturer);
1752 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1753 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1754 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1756 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1758 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1760 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1761 &conn_info_min_age_fops);
1762 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1763 &conn_info_max_age_fops);
1765 if (lmp_bredr_capable(hdev)) {
1766 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1767 hdev, &inquiry_cache_fops);
1768 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1769 hdev, &link_keys_fops);
1770 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1771 hdev, &dev_class_fops);
1772 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1773 hdev, &voice_setting_fops);
1776 if (lmp_ssp_capable(hdev)) {
1777 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1778 hdev, &auto_accept_delay_fops);
1779 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1780 hdev, &force_sc_support_fops);
1781 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1782 hdev, &sc_only_mode_fops);
1785 if (lmp_sniff_capable(hdev)) {
1786 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1787 hdev, &idle_timeout_fops);
1788 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1789 hdev, &sniff_min_interval_fops);
1790 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1791 hdev, &sniff_max_interval_fops);
1794 if (lmp_le_capable(hdev)) {
1795 debugfs_create_file("identity", 0400, hdev->debugfs,
1796 hdev, &identity_fops);
1797 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1798 hdev, &rpa_timeout_fops);
1799 debugfs_create_file("random_address", 0444, hdev->debugfs,
1800 hdev, &random_address_fops);
1801 debugfs_create_file("static_address", 0444, hdev->debugfs,
1802 hdev, &static_address_fops);
1804 /* For controllers with a public address, provide a debug
1805 * option to force the usage of the configured static
1806 * address. By default the public address is used.
1808 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1809 debugfs_create_file("force_static_address", 0644,
1810 hdev->debugfs, hdev,
1811 &force_static_address_fops);
1813 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1814 &hdev->le_white_list_size);
1815 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1817 debugfs_create_file("identity_resolving_keys", 0400,
1818 hdev->debugfs, hdev,
1819 &identity_resolving_keys_fops);
1820 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1821 hdev, &long_term_keys_fops);
1822 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1823 hdev, &conn_min_interval_fops);
1824 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1825 hdev, &conn_max_interval_fops);
1826 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1827 hdev, &conn_latency_fops);
1828 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1829 hdev, &supervision_timeout_fops);
1830 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1831 hdev, &adv_channel_map_fops);
1832 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1834 debugfs_create_u16("discov_interleaved_timeout", 0644,
1836 &hdev->discov_interleaved_timeout);
1842 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1844 struct hci_dev *hdev = req->hdev;
1846 BT_DBG("%s %ld", hdev->name, opt);
1849 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1850 hci_reset_req(req, 0);
1852 /* Read Local Version */
1853 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1855 /* Read BD Address */
1856 if (hdev->set_bdaddr)
1857 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1860 static int __hci_unconf_init(struct hci_dev *hdev)
1864 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1867 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1874 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1878 BT_DBG("%s %x", req->hdev->name, scan);
1880 /* Inquiry and Page scans */
1881 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1884 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1888 BT_DBG("%s %x", req->hdev->name, auth);
1890 /* Authentication */
1891 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1894 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1898 BT_DBG("%s %x", req->hdev->name, encrypt);
1901 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1904 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1906 __le16 policy = cpu_to_le16(opt);
1908 BT_DBG("%s %x", req->hdev->name, policy);
1910 /* Default link policy */
1911 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1914 /* Get HCI device by index.
1915 * Device is held on return. */
1916 struct hci_dev *hci_dev_get(int index)
1918 struct hci_dev *hdev = NULL, *d;
1920 BT_DBG("%d", index);
1925 read_lock(&hci_dev_list_lock);
1926 list_for_each_entry(d, &hci_dev_list, list) {
1927 if (d->id == index) {
1928 hdev = hci_dev_hold(d);
1932 read_unlock(&hci_dev_list_lock);
1936 /* ---- Inquiry support ---- */
1938 bool hci_discovery_active(struct hci_dev *hdev)
1940 struct discovery_state *discov = &hdev->discovery;
1942 switch (discov->state) {
1943 case DISCOVERY_FINDING:
1944 case DISCOVERY_RESOLVING:
1952 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1954 int old_state = hdev->discovery.state;
1956 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1958 if (old_state == state)
1961 hdev->discovery.state = state;
1964 case DISCOVERY_STOPPED:
1965 hci_update_background_scan(hdev);
1967 if (old_state != DISCOVERY_STARTING)
1968 mgmt_discovering(hdev, 0);
1970 case DISCOVERY_STARTING:
1972 case DISCOVERY_FINDING:
1973 mgmt_discovering(hdev, 1);
1975 case DISCOVERY_RESOLVING:
1977 case DISCOVERY_STOPPING:
1982 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1984 struct discovery_state *cache = &hdev->discovery;
1985 struct inquiry_entry *p, *n;
1987 list_for_each_entry_safe(p, n, &cache->all, all) {
1992 INIT_LIST_HEAD(&cache->unknown);
1993 INIT_LIST_HEAD(&cache->resolve);
1996 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1999 struct discovery_state *cache = &hdev->discovery;
2000 struct inquiry_entry *e;
2002 BT_DBG("cache %p, %pMR", cache, bdaddr);
2004 list_for_each_entry(e, &cache->all, all) {
2005 if (!bacmp(&e->data.bdaddr, bdaddr))
2012 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2015 struct discovery_state *cache = &hdev->discovery;
2016 struct inquiry_entry *e;
2018 BT_DBG("cache %p, %pMR", cache, bdaddr);
2020 list_for_each_entry(e, &cache->unknown, list) {
2021 if (!bacmp(&e->data.bdaddr, bdaddr))
2028 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2032 struct discovery_state *cache = &hdev->discovery;
2033 struct inquiry_entry *e;
2035 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2037 list_for_each_entry(e, &cache->resolve, list) {
2038 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2040 if (!bacmp(&e->data.bdaddr, bdaddr))
2047 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2048 struct inquiry_entry *ie)
2050 struct discovery_state *cache = &hdev->discovery;
2051 struct list_head *pos = &cache->resolve;
2052 struct inquiry_entry *p;
2054 list_del(&ie->list);
2056 list_for_each_entry(p, &cache->resolve, list) {
2057 if (p->name_state != NAME_PENDING &&
2058 abs(p->data.rssi) >= abs(ie->data.rssi))
2063 list_add(&ie->list, pos);
2066 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2069 struct discovery_state *cache = &hdev->discovery;
2070 struct inquiry_entry *ie;
2073 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2075 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2077 if (!data->ssp_mode)
2078 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2080 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2082 if (!ie->data.ssp_mode)
2083 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2085 if (ie->name_state == NAME_NEEDED &&
2086 data->rssi != ie->data.rssi) {
2087 ie->data.rssi = data->rssi;
2088 hci_inquiry_cache_update_resolve(hdev, ie);
2094 /* Entry not in the cache. Add new one. */
2095 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2097 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2101 list_add(&ie->all, &cache->all);
2104 ie->name_state = NAME_KNOWN;
2106 ie->name_state = NAME_NOT_KNOWN;
2107 list_add(&ie->list, &cache->unknown);
2111 if (name_known && ie->name_state != NAME_KNOWN &&
2112 ie->name_state != NAME_PENDING) {
2113 ie->name_state = NAME_KNOWN;
2114 list_del(&ie->list);
2117 memcpy(&ie->data, data, sizeof(*data));
2118 ie->timestamp = jiffies;
2119 cache->timestamp = jiffies;
2121 if (ie->name_state == NAME_NOT_KNOWN)
2122 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2128 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2130 struct discovery_state *cache = &hdev->discovery;
2131 struct inquiry_info *info = (struct inquiry_info *) buf;
2132 struct inquiry_entry *e;
2135 list_for_each_entry(e, &cache->all, all) {
2136 struct inquiry_data *data = &e->data;
2141 bacpy(&info->bdaddr, &data->bdaddr);
2142 info->pscan_rep_mode = data->pscan_rep_mode;
2143 info->pscan_period_mode = data->pscan_period_mode;
2144 info->pscan_mode = data->pscan_mode;
2145 memcpy(info->dev_class, data->dev_class, 3);
2146 info->clock_offset = data->clock_offset;
2152 BT_DBG("cache %p, copied %d", cache, copied);
2156 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2158 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2159 struct hci_dev *hdev = req->hdev;
2160 struct hci_cp_inquiry cp;
2162 BT_DBG("%s", hdev->name);
2164 if (test_bit(HCI_INQUIRY, &hdev->flags))
2168 memcpy(&cp.lap, &ir->lap, 3);
2169 cp.length = ir->length;
2170 cp.num_rsp = ir->num_rsp;
2171 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2174 static int wait_inquiry(void *word)
2177 return signal_pending(current);
2180 int hci_inquiry(void __user *arg)
2182 __u8 __user *ptr = arg;
2183 struct hci_inquiry_req ir;
2184 struct hci_dev *hdev;
2185 int err = 0, do_inquiry = 0, max_rsp;
2189 if (copy_from_user(&ir, ptr, sizeof(ir)))
2192 hdev = hci_dev_get(ir.dev_id);
2196 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2201 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2206 if (hdev->dev_type != HCI_BREDR) {
2211 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2217 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2218 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2219 hci_inquiry_cache_flush(hdev);
2222 hci_dev_unlock(hdev);
2224 timeo = ir.length * msecs_to_jiffies(2000);
2227 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2232 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2233 * cleared). If it is interrupted by a signal, return -EINTR.
2235 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2236 TASK_INTERRUPTIBLE))
2240 /* for unlimited number of responses we will use buffer with
2243 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2245 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2246 * copy it to the user space.
2248 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2255 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2256 hci_dev_unlock(hdev);
2258 BT_DBG("num_rsp %d", ir.num_rsp);
2260 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2262 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2275 static int hci_dev_do_open(struct hci_dev *hdev)
2279 BT_DBG("%s %p", hdev->name, hdev);
2283 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2288 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2289 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2290 /* Check for rfkill but allow the HCI setup stage to
2291 * proceed (which in itself doesn't cause any RF activity).
2293 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2298 /* Check for valid public address or a configured static
2299 * random adddress, but let the HCI setup proceed to
2300 * be able to determine if there is a public address
2303 * In case of user channel usage, it is not important
2304 * if a public address or static random address is
2307 * This check is only valid for BR/EDR controllers
2308 * since AMP controllers do not have an address.
2310 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2311 hdev->dev_type == HCI_BREDR &&
2312 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2313 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2314 ret = -EADDRNOTAVAIL;
2319 if (test_bit(HCI_UP, &hdev->flags)) {
2324 if (hdev->open(hdev)) {
2329 atomic_set(&hdev->cmd_cnt, 1);
2330 set_bit(HCI_INIT, &hdev->flags);
2332 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2334 ret = hdev->setup(hdev);
2336 /* The transport driver can set these quirks before
2337 * creating the HCI device or in its setup callback.
2339 * In case any of them is set, the controller has to
2340 * start up as unconfigured.
2342 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2343 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2344 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2346 /* For an unconfigured controller it is required to
2347 * read at least the version information provided by
2348 * the Read Local Version Information command.
2350 * If the set_bdaddr driver callback is provided, then
2351 * also the original Bluetooth public device address
2352 * will be read using the Read BD Address command.
2354 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2355 ret = __hci_unconf_init(hdev);
2358 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2359 /* If public address change is configured, ensure that
2360 * the address gets programmed. If the driver does not
2361 * support changing the public address, fail the power
2364 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2366 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2368 ret = -EADDRNOTAVAIL;
2372 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2373 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2374 ret = __hci_init(hdev);
2377 clear_bit(HCI_INIT, &hdev->flags);
2381 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2382 set_bit(HCI_UP, &hdev->flags);
2383 hci_notify(hdev, HCI_DEV_UP);
2384 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2385 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2386 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2387 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2388 hdev->dev_type == HCI_BREDR) {
2390 mgmt_powered(hdev, 1);
2391 hci_dev_unlock(hdev);
2394 /* Init failed, cleanup */
2395 flush_work(&hdev->tx_work);
2396 flush_work(&hdev->cmd_work);
2397 flush_work(&hdev->rx_work);
2399 skb_queue_purge(&hdev->cmd_q);
2400 skb_queue_purge(&hdev->rx_q);
2405 if (hdev->sent_cmd) {
2406 kfree_skb(hdev->sent_cmd);
2407 hdev->sent_cmd = NULL;
2411 hdev->flags &= BIT(HCI_RAW);
2415 hci_req_unlock(hdev);
2419 /* ---- HCI ioctl helpers ---- */
2421 int hci_dev_open(__u16 dev)
2423 struct hci_dev *hdev;
2426 hdev = hci_dev_get(dev);
2430 /* Devices that are marked as unconfigured can only be powered
2431 * up as user channel. Trying to bring them up as normal devices
2432 * will result into a failure. Only user channel operation is
2435 * When this function is called for a user channel, the flag
2436 * HCI_USER_CHANNEL will be set first before attempting to
2439 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2440 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2445 /* We need to ensure that no other power on/off work is pending
2446 * before proceeding to call hci_dev_do_open. This is
2447 * particularly important if the setup procedure has not yet
2450 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2451 cancel_delayed_work(&hdev->power_off);
2453 /* After this call it is guaranteed that the setup procedure
2454 * has finished. This means that error conditions like RFKILL
2455 * or no valid public or static random address apply.
2457 flush_workqueue(hdev->req_workqueue);
2459 /* For controllers not using the management interface and that
2460 * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
2461 * so that pairing works for them. Once the management interface
2462 * is in use this bit will be cleared again and userspace has
2463 * to explicitly enable it.
2465 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2466 !test_bit(HCI_MGMT, &hdev->dev_flags))
2467 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2469 err = hci_dev_do_open(hdev);
2476 /* This function requires the caller holds hdev->lock */
2477 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2479 struct hci_conn_params *p;
2481 list_for_each_entry(p, &hdev->le_conn_params, list)
2482 list_del_init(&p->action);
2484 BT_DBG("All LE pending actions cleared");
2487 static int hci_dev_do_close(struct hci_dev *hdev)
2489 BT_DBG("%s %p", hdev->name, hdev);
2491 cancel_delayed_work(&hdev->power_off);
2493 hci_req_cancel(hdev, ENODEV);
2496 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2497 cancel_delayed_work_sync(&hdev->cmd_timer);
2498 hci_req_unlock(hdev);
2502 /* Flush RX and TX works */
2503 flush_work(&hdev->tx_work);
2504 flush_work(&hdev->rx_work);
2506 if (hdev->discov_timeout > 0) {
2507 cancel_delayed_work(&hdev->discov_off);
2508 hdev->discov_timeout = 0;
2509 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2510 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2513 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2514 cancel_delayed_work(&hdev->service_cache);
2516 cancel_delayed_work_sync(&hdev->le_scan_disable);
2518 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2519 cancel_delayed_work_sync(&hdev->rpa_expired);
2522 hci_inquiry_cache_flush(hdev);
2523 hci_conn_hash_flush(hdev);
2524 hci_pend_le_actions_clear(hdev);
2525 hci_dev_unlock(hdev);
2527 hci_notify(hdev, HCI_DEV_DOWN);
2533 skb_queue_purge(&hdev->cmd_q);
2534 atomic_set(&hdev->cmd_cnt, 1);
2535 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2536 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2537 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2538 set_bit(HCI_INIT, &hdev->flags);
2539 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2540 clear_bit(HCI_INIT, &hdev->flags);
2543 /* flush cmd work */
2544 flush_work(&hdev->cmd_work);
2547 skb_queue_purge(&hdev->rx_q);
2548 skb_queue_purge(&hdev->cmd_q);
2549 skb_queue_purge(&hdev->raw_q);
2551 /* Drop last sent command */
2552 if (hdev->sent_cmd) {
2553 cancel_delayed_work_sync(&hdev->cmd_timer);
2554 kfree_skb(hdev->sent_cmd);
2555 hdev->sent_cmd = NULL;
2558 kfree_skb(hdev->recv_evt);
2559 hdev->recv_evt = NULL;
2561 /* After this point our queues are empty
2562 * and no tasks are scheduled. */
2566 hdev->flags &= BIT(HCI_RAW);
2567 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2569 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2570 if (hdev->dev_type == HCI_BREDR) {
2572 mgmt_powered(hdev, 0);
2573 hci_dev_unlock(hdev);
2577 /* Controller radio is available but is currently powered down */
2578 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2580 memset(hdev->eir, 0, sizeof(hdev->eir));
2581 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2582 bacpy(&hdev->random_addr, BDADDR_ANY);
2584 hci_req_unlock(hdev);
2590 int hci_dev_close(__u16 dev)
2592 struct hci_dev *hdev;
2595 hdev = hci_dev_get(dev);
2599 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2604 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2605 cancel_delayed_work(&hdev->power_off);
2607 err = hci_dev_do_close(hdev);
2614 int hci_dev_reset(__u16 dev)
2616 struct hci_dev *hdev;
2619 hdev = hci_dev_get(dev);
2625 if (!test_bit(HCI_UP, &hdev->flags)) {
2630 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2635 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2641 skb_queue_purge(&hdev->rx_q);
2642 skb_queue_purge(&hdev->cmd_q);
2645 hci_inquiry_cache_flush(hdev);
2646 hci_conn_hash_flush(hdev);
2647 hci_dev_unlock(hdev);
2652 atomic_set(&hdev->cmd_cnt, 1);
2653 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2655 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2658 hci_req_unlock(hdev);
2663 int hci_dev_reset_stat(__u16 dev)
2665 struct hci_dev *hdev;
2668 hdev = hci_dev_get(dev);
2672 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2677 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2682 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2689 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2691 bool conn_changed, discov_changed;
2693 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2695 if ((scan & SCAN_PAGE))
2696 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2699 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2702 if ((scan & SCAN_INQUIRY)) {
2703 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2706 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2707 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2711 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2714 if (conn_changed || discov_changed) {
2715 /* In case this was disabled through mgmt */
2716 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2718 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2719 mgmt_update_adv_data(hdev);
2721 mgmt_new_settings(hdev);
2725 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2727 struct hci_dev *hdev;
2728 struct hci_dev_req dr;
2731 if (copy_from_user(&dr, arg, sizeof(dr)))
2734 hdev = hci_dev_get(dr.dev_id);
2738 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2743 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2748 if (hdev->dev_type != HCI_BREDR) {
2753 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2760 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2765 if (!lmp_encrypt_capable(hdev)) {
2770 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2771 /* Auth must be enabled first */
2772 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2778 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2783 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2786 /* Ensure that the connectable and discoverable states
2787 * get correctly modified as this was a non-mgmt change.
2790 hci_update_scan_state(hdev, dr.dev_opt);
2794 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2798 case HCISETLINKMODE:
2799 hdev->link_mode = ((__u16) dr.dev_opt) &
2800 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2804 hdev->pkt_type = (__u16) dr.dev_opt;
2808 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2809 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2813 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2814 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2827 int hci_get_dev_list(void __user *arg)
2829 struct hci_dev *hdev;
2830 struct hci_dev_list_req *dl;
2831 struct hci_dev_req *dr;
2832 int n = 0, size, err;
2835 if (get_user(dev_num, (__u16 __user *) arg))
2838 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2841 size = sizeof(*dl) + dev_num * sizeof(*dr);
2843 dl = kzalloc(size, GFP_KERNEL);
2849 read_lock(&hci_dev_list_lock);
2850 list_for_each_entry(hdev, &hci_dev_list, list) {
2851 unsigned long flags = hdev->flags;
2853 /* When the auto-off is configured it means the transport
2854 * is running, but in that case still indicate that the
2855 * device is actually down.
2857 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2858 flags &= ~BIT(HCI_UP);
2860 (dr + n)->dev_id = hdev->id;
2861 (dr + n)->dev_opt = flags;
2866 read_unlock(&hci_dev_list_lock);
2869 size = sizeof(*dl) + n * sizeof(*dr);
2871 err = copy_to_user(arg, dl, size);
2874 return err ? -EFAULT : 0;
2877 int hci_get_dev_info(void __user *arg)
2879 struct hci_dev *hdev;
2880 struct hci_dev_info di;
2881 unsigned long flags;
2884 if (copy_from_user(&di, arg, sizeof(di)))
2887 hdev = hci_dev_get(di.dev_id);
2891 /* When the auto-off is configured it means the transport
2892 * is running, but in that case still indicate that the
2893 * device is actually down.
2895 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2896 flags = hdev->flags & ~BIT(HCI_UP);
2898 flags = hdev->flags;
2900 strcpy(di.name, hdev->name);
2901 di.bdaddr = hdev->bdaddr;
2902 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2904 di.pkt_type = hdev->pkt_type;
2905 if (lmp_bredr_capable(hdev)) {
2906 di.acl_mtu = hdev->acl_mtu;
2907 di.acl_pkts = hdev->acl_pkts;
2908 di.sco_mtu = hdev->sco_mtu;
2909 di.sco_pkts = hdev->sco_pkts;
2911 di.acl_mtu = hdev->le_mtu;
2912 di.acl_pkts = hdev->le_pkts;
2916 di.link_policy = hdev->link_policy;
2917 di.link_mode = hdev->link_mode;
2919 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2920 memcpy(&di.features, &hdev->features, sizeof(di.features));
2922 if (copy_to_user(arg, &di, sizeof(di)))
2930 /* ---- Interface to HCI drivers ---- */
2932 static int hci_rfkill_set_block(void *data, bool blocked)
2934 struct hci_dev *hdev = data;
2936 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2938 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2942 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2943 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2944 !test_bit(HCI_CONFIG, &hdev->dev_flags))
2945 hci_dev_do_close(hdev);
2947 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2953 static const struct rfkill_ops hci_rfkill_ops = {
2954 .set_block = hci_rfkill_set_block,
2957 static void hci_power_on(struct work_struct *work)
2959 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2962 BT_DBG("%s", hdev->name);
2964 err = hci_dev_do_open(hdev);
2966 mgmt_set_powered_failed(hdev, err);
2970 /* During the HCI setup phase, a few error conditions are
2971 * ignored and they need to be checked now. If they are still
2972 * valid, it is important to turn the device back off.
2974 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2975 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2976 (hdev->dev_type == HCI_BREDR &&
2977 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2978 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2979 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2980 hci_dev_do_close(hdev);
2981 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2982 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2983 HCI_AUTO_OFF_TIMEOUT);
2986 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2987 /* For unconfigured devices, set the HCI_RAW flag
2988 * so that userspace can easily identify them.
2990 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2991 set_bit(HCI_RAW, &hdev->flags);
2993 /* For fully configured devices, this will send
2994 * the Index Added event. For unconfigured devices,
2995 * it will send Unconfigued Index Added event.
2997 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2998 * and no event will be send.
3000 mgmt_index_added(hdev);
3001 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3002 /* When the controller is now configured, then it
3003 * is important to clear the HCI_RAW flag.
3005 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3006 clear_bit(HCI_RAW, &hdev->flags);
3008 /* Powering on the controller with HCI_CONFIG set only
3009 * happens with the transition from unconfigured to
3010 * configured. This will send the Index Added event.
3012 mgmt_index_added(hdev);
3016 static void hci_power_off(struct work_struct *work)
3018 struct hci_dev *hdev = container_of(work, struct hci_dev,
3021 BT_DBG("%s", hdev->name);
3023 hci_dev_do_close(hdev);
3026 static void hci_discov_off(struct work_struct *work)
3028 struct hci_dev *hdev;
3030 hdev = container_of(work, struct hci_dev, discov_off.work);
3032 BT_DBG("%s", hdev->name);
3034 mgmt_discoverable_timeout(hdev);
3037 void hci_uuids_clear(struct hci_dev *hdev)
3039 struct bt_uuid *uuid, *tmp;
3041 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3042 list_del(&uuid->list);
3047 void hci_link_keys_clear(struct hci_dev *hdev)
3049 struct list_head *p, *n;
3051 list_for_each_safe(p, n, &hdev->link_keys) {
3052 struct link_key *key;
3054 key = list_entry(p, struct link_key, list);
3061 void hci_smp_ltks_clear(struct hci_dev *hdev)
3063 struct smp_ltk *k, *tmp;
3065 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3071 void hci_smp_irks_clear(struct hci_dev *hdev)
3073 struct smp_irk *k, *tmp;
3075 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3081 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3085 list_for_each_entry(k, &hdev->link_keys, list)
3086 if (bacmp(bdaddr, &k->bdaddr) == 0)
3092 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3093 u8 key_type, u8 old_key_type)
3096 if (key_type < 0x03)
3099 /* Debug keys are insecure so don't store them persistently */
3100 if (key_type == HCI_LK_DEBUG_COMBINATION)
3103 /* Changed combination key and there's no previous one */
3104 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3107 /* Security mode 3 case */
3111 /* Neither local nor remote side had no-bonding as requirement */
3112 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3115 /* Local side had dedicated bonding as requirement */
3116 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3119 /* Remote side had dedicated bonding as requirement */
3120 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3123 /* If none of the above criteria match, then don't store the key
3128 static u8 ltk_role(u8 type)
3130 if (type == SMP_LTK)
3131 return HCI_ROLE_MASTER;
3133 return HCI_ROLE_SLAVE;
3136 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3141 list_for_each_entry(k, &hdev->long_term_keys, list) {
3142 if (k->ediv != ediv || k->rand != rand)
3145 if (ltk_role(k->type) != role)
3154 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3155 u8 addr_type, u8 role)
3159 list_for_each_entry(k, &hdev->long_term_keys, list)
3160 if (addr_type == k->bdaddr_type &&
3161 bacmp(bdaddr, &k->bdaddr) == 0 &&
3162 ltk_role(k->type) == role)
3168 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3170 struct smp_irk *irk;
3172 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3173 if (!bacmp(&irk->rpa, rpa))
3177 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3178 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3179 bacpy(&irk->rpa, rpa);
3187 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3190 struct smp_irk *irk;
3192 /* Identity Address must be public or static random */
3193 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3196 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3197 if (addr_type == irk->addr_type &&
3198 bacmp(bdaddr, &irk->bdaddr) == 0)
3205 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3206 bdaddr_t *bdaddr, u8 *val, u8 type,
3207 u8 pin_len, bool *persistent)
3209 struct link_key *key, *old_key;
3212 old_key = hci_find_link_key(hdev, bdaddr);
3214 old_key_type = old_key->type;
3217 old_key_type = conn ? conn->key_type : 0xff;
3218 key = kzalloc(sizeof(*key), GFP_KERNEL);
3221 list_add(&key->list, &hdev->link_keys);
3224 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3226 /* Some buggy controller combinations generate a changed
3227 * combination key for legacy pairing even when there's no
3229 if (type == HCI_LK_CHANGED_COMBINATION &&
3230 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3231 type = HCI_LK_COMBINATION;
3233 conn->key_type = type;
3236 bacpy(&key->bdaddr, bdaddr);
3237 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3238 key->pin_len = pin_len;
3240 if (type == HCI_LK_CHANGED_COMBINATION)
3241 key->type = old_key_type;
3246 *persistent = hci_persistent_key(hdev, conn, type,
3252 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3253 u8 addr_type, u8 type, u8 authenticated,
3254 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3256 struct smp_ltk *key, *old_key;
3257 u8 role = ltk_role(type);
3259 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3263 key = kzalloc(sizeof(*key), GFP_KERNEL);
3266 list_add(&key->list, &hdev->long_term_keys);
3269 bacpy(&key->bdaddr, bdaddr);
3270 key->bdaddr_type = addr_type;
3271 memcpy(key->val, tk, sizeof(key->val));
3272 key->authenticated = authenticated;
3275 key->enc_size = enc_size;
3281 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3282 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3284 struct smp_irk *irk;
3286 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3288 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3292 bacpy(&irk->bdaddr, bdaddr);
3293 irk->addr_type = addr_type;
3295 list_add(&irk->list, &hdev->identity_resolving_keys);
3298 memcpy(irk->val, val, 16);
3299 bacpy(&irk->rpa, rpa);
3304 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3306 struct link_key *key;
3308 key = hci_find_link_key(hdev, bdaddr);
3312 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3314 list_del(&key->list);
3320 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3322 struct smp_ltk *k, *tmp;
3325 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3326 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3329 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3336 return removed ? 0 : -ENOENT;
3339 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3341 struct smp_irk *k, *tmp;
3343 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3344 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3347 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3354 /* HCI command timer function */
3355 static void hci_cmd_timeout(struct work_struct *work)
3357 struct hci_dev *hdev = container_of(work, struct hci_dev,
3360 if (hdev->sent_cmd) {
3361 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3362 u16 opcode = __le16_to_cpu(sent->opcode);
3364 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3366 BT_ERR("%s command tx timeout", hdev->name);
3369 atomic_set(&hdev->cmd_cnt, 1);
3370 queue_work(hdev->workqueue, &hdev->cmd_work);
3373 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3376 struct oob_data *data;
3378 list_for_each_entry(data, &hdev->remote_oob_data, list)
3379 if (bacmp(bdaddr, &data->bdaddr) == 0)
3385 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3387 struct oob_data *data;
3389 data = hci_find_remote_oob_data(hdev, bdaddr);
3393 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3395 list_del(&data->list);
3401 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3403 struct oob_data *data, *n;
3405 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3406 list_del(&data->list);
3411 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3412 u8 *hash, u8 *randomizer)
3414 struct oob_data *data;
3416 data = hci_find_remote_oob_data(hdev, bdaddr);
3418 data = kmalloc(sizeof(*data), GFP_KERNEL);
3422 bacpy(&data->bdaddr, bdaddr);
3423 list_add(&data->list, &hdev->remote_oob_data);
3426 memcpy(data->hash192, hash, sizeof(data->hash192));
3427 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3429 memset(data->hash256, 0, sizeof(data->hash256));
3430 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3432 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3437 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3438 u8 *hash192, u8 *randomizer192,
3439 u8 *hash256, u8 *randomizer256)
3441 struct oob_data *data;
3443 data = hci_find_remote_oob_data(hdev, bdaddr);
3445 data = kmalloc(sizeof(*data), GFP_KERNEL);
3449 bacpy(&data->bdaddr, bdaddr);
3450 list_add(&data->list, &hdev->remote_oob_data);
3453 memcpy(data->hash192, hash192, sizeof(data->hash192));
3454 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3456 memcpy(data->hash256, hash256, sizeof(data->hash256));
3457 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3459 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3464 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3465 bdaddr_t *bdaddr, u8 type)
3467 struct bdaddr_list *b;
3469 list_for_each_entry(b, bdaddr_list, list) {
3470 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3477 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3479 struct list_head *p, *n;
3481 list_for_each_safe(p, n, bdaddr_list) {
3482 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3489 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3491 struct bdaddr_list *entry;
3493 if (!bacmp(bdaddr, BDADDR_ANY))
3496 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3499 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3503 bacpy(&entry->bdaddr, bdaddr);
3504 entry->bdaddr_type = type;
3506 list_add(&entry->list, list);
3511 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3513 struct bdaddr_list *entry;
3515 if (!bacmp(bdaddr, BDADDR_ANY)) {
3516 hci_bdaddr_list_clear(list);
3520 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3524 list_del(&entry->list);
3530 /* This function requires the caller holds hdev->lock */
3531 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3532 bdaddr_t *addr, u8 addr_type)
3534 struct hci_conn_params *params;
3536 /* The conn params list only contains identity addresses */
3537 if (!hci_is_identity_address(addr, addr_type))
3540 list_for_each_entry(params, &hdev->le_conn_params, list) {
3541 if (bacmp(¶ms->addr, addr) == 0 &&
3542 params->addr_type == addr_type) {
3550 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3552 struct hci_conn *conn;
3554 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3558 if (conn->dst_type != type)
3561 if (conn->state != BT_CONNECTED)
3567 /* This function requires the caller holds hdev->lock */
3568 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3569 bdaddr_t *addr, u8 addr_type)
3571 struct hci_conn_params *param;
3573 /* The list only contains identity addresses */
3574 if (!hci_is_identity_address(addr, addr_type))
3577 list_for_each_entry(param, list, action) {
3578 if (bacmp(¶m->addr, addr) == 0 &&
3579 param->addr_type == addr_type)
3586 /* This function requires the caller holds hdev->lock */
3587 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3588 bdaddr_t *addr, u8 addr_type)
3590 struct hci_conn_params *params;
3592 if (!hci_is_identity_address(addr, addr_type))
3595 params = hci_conn_params_lookup(hdev, addr, addr_type);
3599 params = kzalloc(sizeof(*params), GFP_KERNEL);
3601 BT_ERR("Out of memory");
3605 bacpy(¶ms->addr, addr);
3606 params->addr_type = addr_type;
3608 list_add(¶ms->list, &hdev->le_conn_params);
3609 INIT_LIST_HEAD(¶ms->action);
3611 params->conn_min_interval = hdev->le_conn_min_interval;
3612 params->conn_max_interval = hdev->le_conn_max_interval;
3613 params->conn_latency = hdev->le_conn_latency;
3614 params->supervision_timeout = hdev->le_supv_timeout;
3615 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3617 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3622 /* This function requires the caller holds hdev->lock */
3623 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3626 struct hci_conn_params *params;
3628 params = hci_conn_params_add(hdev, addr, addr_type);
3632 if (params->auto_connect == auto_connect)
3635 list_del_init(¶ms->action);
3637 switch (auto_connect) {
3638 case HCI_AUTO_CONN_DISABLED:
3639 case HCI_AUTO_CONN_LINK_LOSS:
3640 hci_update_background_scan(hdev);
3642 case HCI_AUTO_CONN_REPORT:
3643 list_add(¶ms->action, &hdev->pend_le_reports);
3644 hci_update_background_scan(hdev);
3646 case HCI_AUTO_CONN_ALWAYS:
3647 if (!is_connected(hdev, addr, addr_type)) {
3648 list_add(¶ms->action, &hdev->pend_le_conns);
3649 hci_update_background_scan(hdev);
3654 params->auto_connect = auto_connect;
3656 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3662 /* This function requires the caller holds hdev->lock */
3663 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3665 struct hci_conn_params *params;
3667 params = hci_conn_params_lookup(hdev, addr, addr_type);
3671 list_del(¶ms->action);
3672 list_del(¶ms->list);
3675 hci_update_background_scan(hdev);
3677 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3680 /* This function requires the caller holds hdev->lock */
3681 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3683 struct hci_conn_params *params, *tmp;
3685 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3686 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3688 list_del(¶ms->list);
3692 BT_DBG("All LE disabled connection parameters were removed");
3695 /* This function requires the caller holds hdev->lock */
3696 void hci_conn_params_clear_all(struct hci_dev *hdev)
3698 struct hci_conn_params *params, *tmp;
3700 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3701 list_del(¶ms->action);
3702 list_del(¶ms->list);
3706 hci_update_background_scan(hdev);
3708 BT_DBG("All LE connection parameters were removed");
3711 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3714 BT_ERR("Failed to start inquiry: status %d", status);
3717 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3718 hci_dev_unlock(hdev);
3723 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3725 /* General inquiry access code (GIAC) */
3726 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3727 struct hci_request req;
3728 struct hci_cp_inquiry cp;
3732 BT_ERR("Failed to disable LE scanning: status %d", status);
3736 switch (hdev->discovery.type) {
3737 case DISCOV_TYPE_LE:
3739 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3740 hci_dev_unlock(hdev);
3743 case DISCOV_TYPE_INTERLEAVED:
3744 hci_req_init(&req, hdev);
3746 memset(&cp, 0, sizeof(cp));
3747 memcpy(&cp.lap, lap, sizeof(cp.lap));
3748 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3749 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3753 hci_inquiry_cache_flush(hdev);
3755 err = hci_req_run(&req, inquiry_complete);
3757 BT_ERR("Inquiry request failed: err %d", err);
3758 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3761 hci_dev_unlock(hdev);
3766 static void le_scan_disable_work(struct work_struct *work)
3768 struct hci_dev *hdev = container_of(work, struct hci_dev,
3769 le_scan_disable.work);
3770 struct hci_request req;
3773 BT_DBG("%s", hdev->name);
3775 hci_req_init(&req, hdev);
3777 hci_req_add_le_scan_disable(&req);
3779 err = hci_req_run(&req, le_scan_disable_work_complete);
3781 BT_ERR("Disable LE scanning request failed: err %d", err);
3784 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3786 struct hci_dev *hdev = req->hdev;
3788 /* If we're advertising or initiating an LE connection we can't
3789 * go ahead and change the random address at this time. This is
3790 * because the eventual initiator address used for the
3791 * subsequently created connection will be undefined (some
3792 * controllers use the new address and others the one we had
3793 * when the operation started).
3795 * In this kind of scenario skip the update and let the random
3796 * address be updated at the next cycle.
3798 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3799 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3800 BT_DBG("Deferring random address update");
3804 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3807 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3810 struct hci_dev *hdev = req->hdev;
3813 /* If privacy is enabled use a resolvable private address. If
3814 * current RPA has expired or there is something else than
3815 * the current RPA in use, then generate a new one.
3817 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3820 *own_addr_type = ADDR_LE_DEV_RANDOM;
3822 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3823 !bacmp(&hdev->random_addr, &hdev->rpa))
3826 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3828 BT_ERR("%s failed to generate new RPA", hdev->name);
3832 set_random_addr(req, &hdev->rpa);
3834 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3835 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3840 /* In case of required privacy without resolvable private address,
3841 * use an unresolvable private address. This is useful for active
3842 * scanning and non-connectable advertising.
3844 if (require_privacy) {
3847 get_random_bytes(&urpa, 6);
3848 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3850 *own_addr_type = ADDR_LE_DEV_RANDOM;
3851 set_random_addr(req, &urpa);
3855 /* If forcing static address is in use or there is no public
3856 * address use the static address as random address (but skip
3857 * the HCI command if the current random address is already the
3860 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3861 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3862 *own_addr_type = ADDR_LE_DEV_RANDOM;
3863 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3864 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3865 &hdev->static_addr);
3869 /* Neither privacy nor static address is being used so use a
3872 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3877 /* Copy the Identity Address of the controller.
3879 * If the controller has a public BD_ADDR, then by default use that one.
3880 * If this is a LE only controller without a public address, default to
3881 * the static random address.
3883 * For debugging purposes it is possible to force controllers with a
3884 * public address to use the static random address instead.
3886 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3889 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3890 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3891 bacpy(bdaddr, &hdev->static_addr);
3892 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3894 bacpy(bdaddr, &hdev->bdaddr);
3895 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3899 /* Alloc HCI device */
3900 struct hci_dev *hci_alloc_dev(void)
3902 struct hci_dev *hdev;
3904 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3908 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3909 hdev->esco_type = (ESCO_HV1);
3910 hdev->link_mode = (HCI_LM_ACCEPT);
3911 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3912 hdev->io_capability = 0x03; /* No Input No Output */
3913 hdev->manufacturer = 0xffff; /* Default to internal use */
3914 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3915 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3917 hdev->sniff_max_interval = 800;
3918 hdev->sniff_min_interval = 80;
3920 hdev->le_adv_channel_map = 0x07;
3921 hdev->le_scan_interval = 0x0060;
3922 hdev->le_scan_window = 0x0030;
3923 hdev->le_conn_min_interval = 0x0028;
3924 hdev->le_conn_max_interval = 0x0038;
3925 hdev->le_conn_latency = 0x0000;
3926 hdev->le_supv_timeout = 0x002a;
3928 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3929 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3930 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3931 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3933 mutex_init(&hdev->lock);
3934 mutex_init(&hdev->req_lock);
3936 INIT_LIST_HEAD(&hdev->mgmt_pending);
3937 INIT_LIST_HEAD(&hdev->blacklist);
3938 INIT_LIST_HEAD(&hdev->whitelist);
3939 INIT_LIST_HEAD(&hdev->uuids);
3940 INIT_LIST_HEAD(&hdev->link_keys);
3941 INIT_LIST_HEAD(&hdev->long_term_keys);
3942 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3943 INIT_LIST_HEAD(&hdev->remote_oob_data);
3944 INIT_LIST_HEAD(&hdev->le_white_list);
3945 INIT_LIST_HEAD(&hdev->le_conn_params);
3946 INIT_LIST_HEAD(&hdev->pend_le_conns);
3947 INIT_LIST_HEAD(&hdev->pend_le_reports);
3948 INIT_LIST_HEAD(&hdev->conn_hash.list);
3950 INIT_WORK(&hdev->rx_work, hci_rx_work);
3951 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3952 INIT_WORK(&hdev->tx_work, hci_tx_work);
3953 INIT_WORK(&hdev->power_on, hci_power_on);
3955 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3956 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3957 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3959 skb_queue_head_init(&hdev->rx_q);
3960 skb_queue_head_init(&hdev->cmd_q);
3961 skb_queue_head_init(&hdev->raw_q);
3963 init_waitqueue_head(&hdev->req_wait_q);
3965 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3967 hci_init_sysfs(hdev);
3968 discovery_init(hdev);
3972 EXPORT_SYMBOL(hci_alloc_dev);
3974 /* Free HCI device */
3975 void hci_free_dev(struct hci_dev *hdev)
3977 /* will free via device release */
3978 put_device(&hdev->dev);
3980 EXPORT_SYMBOL(hci_free_dev);
3982 /* Register HCI device */
3983 int hci_register_dev(struct hci_dev *hdev)
3987 if (!hdev->open || !hdev->close || !hdev->send)
3990 /* Do not allow HCI_AMP devices to register at index 0,
3991 * so the index can be used as the AMP controller ID.
3993 switch (hdev->dev_type) {
3995 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3998 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4007 sprintf(hdev->name, "hci%d", id);
4010 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4012 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4013 WQ_MEM_RECLAIM, 1, hdev->name);
4014 if (!hdev->workqueue) {
4019 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4020 WQ_MEM_RECLAIM, 1, hdev->name);
4021 if (!hdev->req_workqueue) {
4022 destroy_workqueue(hdev->workqueue);
4027 if (!IS_ERR_OR_NULL(bt_debugfs))
4028 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4030 dev_set_name(&hdev->dev, "%s", hdev->name);
4032 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4034 if (IS_ERR(hdev->tfm_aes)) {
4035 BT_ERR("Unable to create crypto context");
4036 error = PTR_ERR(hdev->tfm_aes);
4037 hdev->tfm_aes = NULL;
4041 error = device_add(&hdev->dev);
4045 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4046 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4049 if (rfkill_register(hdev->rfkill) < 0) {
4050 rfkill_destroy(hdev->rfkill);
4051 hdev->rfkill = NULL;
4055 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4056 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4058 set_bit(HCI_SETUP, &hdev->dev_flags);
4059 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4061 if (hdev->dev_type == HCI_BREDR) {
4062 /* Assume BR/EDR support until proven otherwise (such as
4063 * through reading supported features during init.
4065 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4068 write_lock(&hci_dev_list_lock);
4069 list_add(&hdev->list, &hci_dev_list);
4070 write_unlock(&hci_dev_list_lock);
4072 /* Devices that are marked for raw-only usage are unconfigured
4073 * and should not be included in normal operation.
4075 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4076 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4078 hci_notify(hdev, HCI_DEV_REG);
4081 queue_work(hdev->req_workqueue, &hdev->power_on);
4086 crypto_free_blkcipher(hdev->tfm_aes);
4088 destroy_workqueue(hdev->workqueue);
4089 destroy_workqueue(hdev->req_workqueue);
4091 ida_simple_remove(&hci_index_ida, hdev->id);
4095 EXPORT_SYMBOL(hci_register_dev);
4097 /* Unregister HCI device */
4098 void hci_unregister_dev(struct hci_dev *hdev)
4102 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4104 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4108 write_lock(&hci_dev_list_lock);
4109 list_del(&hdev->list);
4110 write_unlock(&hci_dev_list_lock);
4112 hci_dev_do_close(hdev);
4114 for (i = 0; i < NUM_REASSEMBLY; i++)
4115 kfree_skb(hdev->reassembly[i]);
4117 cancel_work_sync(&hdev->power_on);
4119 if (!test_bit(HCI_INIT, &hdev->flags) &&
4120 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4121 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4123 mgmt_index_removed(hdev);
4124 hci_dev_unlock(hdev);
4127 /* mgmt_index_removed should take care of emptying the
4129 BUG_ON(!list_empty(&hdev->mgmt_pending));
4131 hci_notify(hdev, HCI_DEV_UNREG);
4134 rfkill_unregister(hdev->rfkill);
4135 rfkill_destroy(hdev->rfkill);
4139 crypto_free_blkcipher(hdev->tfm_aes);
4141 device_del(&hdev->dev);
4143 debugfs_remove_recursive(hdev->debugfs);
4145 destroy_workqueue(hdev->workqueue);
4146 destroy_workqueue(hdev->req_workqueue);
4149 hci_bdaddr_list_clear(&hdev->blacklist);
4150 hci_bdaddr_list_clear(&hdev->whitelist);
4151 hci_uuids_clear(hdev);
4152 hci_link_keys_clear(hdev);
4153 hci_smp_ltks_clear(hdev);
4154 hci_smp_irks_clear(hdev);
4155 hci_remote_oob_data_clear(hdev);
4156 hci_bdaddr_list_clear(&hdev->le_white_list);
4157 hci_conn_params_clear_all(hdev);
4158 hci_dev_unlock(hdev);
4162 ida_simple_remove(&hci_index_ida, id);
4164 EXPORT_SYMBOL(hci_unregister_dev);
4166 /* Suspend HCI device */
4167 int hci_suspend_dev(struct hci_dev *hdev)
4169 hci_notify(hdev, HCI_DEV_SUSPEND);
4172 EXPORT_SYMBOL(hci_suspend_dev);
4174 /* Resume HCI device */
4175 int hci_resume_dev(struct hci_dev *hdev)
4177 hci_notify(hdev, HCI_DEV_RESUME);
4180 EXPORT_SYMBOL(hci_resume_dev);
4182 /* Receive frame from HCI drivers */
4183 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4185 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4186 && !test_bit(HCI_INIT, &hdev->flags))) {
4192 bt_cb(skb)->incoming = 1;
4195 __net_timestamp(skb);
4197 skb_queue_tail(&hdev->rx_q, skb);
4198 queue_work(hdev->workqueue, &hdev->rx_work);
4202 EXPORT_SYMBOL(hci_recv_frame);
4204 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4205 int count, __u8 index)
4210 struct sk_buff *skb;
4211 struct bt_skb_cb *scb;
4213 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4214 index >= NUM_REASSEMBLY)
4217 skb = hdev->reassembly[index];
4221 case HCI_ACLDATA_PKT:
4222 len = HCI_MAX_FRAME_SIZE;
4223 hlen = HCI_ACL_HDR_SIZE;
4226 len = HCI_MAX_EVENT_SIZE;
4227 hlen = HCI_EVENT_HDR_SIZE;
4229 case HCI_SCODATA_PKT:
4230 len = HCI_MAX_SCO_SIZE;
4231 hlen = HCI_SCO_HDR_SIZE;
4235 skb = bt_skb_alloc(len, GFP_ATOMIC);
4239 scb = (void *) skb->cb;
4241 scb->pkt_type = type;
4243 hdev->reassembly[index] = skb;
4247 scb = (void *) skb->cb;
4248 len = min_t(uint, scb->expect, count);
4250 memcpy(skb_put(skb, len), data, len);
4259 if (skb->len == HCI_EVENT_HDR_SIZE) {
4260 struct hci_event_hdr *h = hci_event_hdr(skb);
4261 scb->expect = h->plen;
4263 if (skb_tailroom(skb) < scb->expect) {
4265 hdev->reassembly[index] = NULL;
4271 case HCI_ACLDATA_PKT:
4272 if (skb->len == HCI_ACL_HDR_SIZE) {
4273 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4274 scb->expect = __le16_to_cpu(h->dlen);
4276 if (skb_tailroom(skb) < scb->expect) {
4278 hdev->reassembly[index] = NULL;
4284 case HCI_SCODATA_PKT:
4285 if (skb->len == HCI_SCO_HDR_SIZE) {
4286 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4287 scb->expect = h->dlen;
4289 if (skb_tailroom(skb) < scb->expect) {
4291 hdev->reassembly[index] = NULL;
4298 if (scb->expect == 0) {
4299 /* Complete frame */
4301 bt_cb(skb)->pkt_type = type;
4302 hci_recv_frame(hdev, skb);
4304 hdev->reassembly[index] = NULL;
4312 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4316 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4320 rem = hci_reassembly(hdev, type, data, count, type - 1);
4324 data += (count - rem);
4330 EXPORT_SYMBOL(hci_recv_fragment);
4332 #define STREAM_REASSEMBLY 0
4334 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4340 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4343 struct { char type; } *pkt;
4345 /* Start of the frame */
4352 type = bt_cb(skb)->pkt_type;
4354 rem = hci_reassembly(hdev, type, data, count,
4359 data += (count - rem);
4365 EXPORT_SYMBOL(hci_recv_stream_fragment);
4367 /* ---- Interface to upper protocols ---- */
4369 int hci_register_cb(struct hci_cb *cb)
4371 BT_DBG("%p name %s", cb, cb->name);
4373 write_lock(&hci_cb_list_lock);
4374 list_add(&cb->list, &hci_cb_list);
4375 write_unlock(&hci_cb_list_lock);
4379 EXPORT_SYMBOL(hci_register_cb);
4381 int hci_unregister_cb(struct hci_cb *cb)
4383 BT_DBG("%p name %s", cb, cb->name);
4385 write_lock(&hci_cb_list_lock);
4386 list_del(&cb->list);
4387 write_unlock(&hci_cb_list_lock);
4391 EXPORT_SYMBOL(hci_unregister_cb);
4393 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4397 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4400 __net_timestamp(skb);
4402 /* Send copy to monitor */
4403 hci_send_to_monitor(hdev, skb);
4405 if (atomic_read(&hdev->promisc)) {
4406 /* Send copy to the sockets */
4407 hci_send_to_sock(hdev, skb);
4410 /* Get rid of skb owner, prior to sending to the driver. */
4413 err = hdev->send(hdev, skb);
4415 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4420 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4422 skb_queue_head_init(&req->cmd_q);
4427 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4429 struct hci_dev *hdev = req->hdev;
4430 struct sk_buff *skb;
4431 unsigned long flags;
4433 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4435 /* If an error occured during request building, remove all HCI
4436 * commands queued on the HCI request queue.
4439 skb_queue_purge(&req->cmd_q);
4443 /* Do not allow empty requests */
4444 if (skb_queue_empty(&req->cmd_q))
4447 skb = skb_peek_tail(&req->cmd_q);
4448 bt_cb(skb)->req.complete = complete;
4450 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4451 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4452 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4454 queue_work(hdev->workqueue, &hdev->cmd_work);
4459 bool hci_req_pending(struct hci_dev *hdev)
4461 return (hdev->req_status == HCI_REQ_PEND);
4464 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4465 u32 plen, const void *param)
4467 int len = HCI_COMMAND_HDR_SIZE + plen;
4468 struct hci_command_hdr *hdr;
4469 struct sk_buff *skb;
4471 skb = bt_skb_alloc(len, GFP_ATOMIC);
4475 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4476 hdr->opcode = cpu_to_le16(opcode);
4480 memcpy(skb_put(skb, plen), param, plen);
4482 BT_DBG("skb len %d", skb->len);
4484 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4489 /* Send HCI command */
4490 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4493 struct sk_buff *skb;
4495 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4497 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4499 BT_ERR("%s no memory for command", hdev->name);
4503 /* Stand-alone HCI commands must be flaged as
4504 * single-command requests.
4506 bt_cb(skb)->req.start = true;
4508 skb_queue_tail(&hdev->cmd_q, skb);
4509 queue_work(hdev->workqueue, &hdev->cmd_work);
4514 /* Queue a command to an asynchronous HCI request */
4515 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4516 const void *param, u8 event)
4518 struct hci_dev *hdev = req->hdev;
4519 struct sk_buff *skb;
4521 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4523 /* If an error occured during request building, there is no point in
4524 * queueing the HCI command. We can simply return.
4529 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4531 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4532 hdev->name, opcode);
4537 if (skb_queue_empty(&req->cmd_q))
4538 bt_cb(skb)->req.start = true;
4540 bt_cb(skb)->req.event = event;
4542 skb_queue_tail(&req->cmd_q, skb);
4545 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4548 hci_req_add_ev(req, opcode, plen, param, 0);
4551 /* Get data from the previously sent command */
4552 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4554 struct hci_command_hdr *hdr;
4556 if (!hdev->sent_cmd)
4559 hdr = (void *) hdev->sent_cmd->data;
4561 if (hdr->opcode != cpu_to_le16(opcode))
4564 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4566 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4570 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4572 struct hci_acl_hdr *hdr;
4575 skb_push(skb, HCI_ACL_HDR_SIZE);
4576 skb_reset_transport_header(skb);
4577 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4578 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4579 hdr->dlen = cpu_to_le16(len);
4582 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4583 struct sk_buff *skb, __u16 flags)
4585 struct hci_conn *conn = chan->conn;
4586 struct hci_dev *hdev = conn->hdev;
4587 struct sk_buff *list;
4589 skb->len = skb_headlen(skb);
4592 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4594 switch (hdev->dev_type) {
4596 hci_add_acl_hdr(skb, conn->handle, flags);
4599 hci_add_acl_hdr(skb, chan->handle, flags);
4602 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4606 list = skb_shinfo(skb)->frag_list;
4608 /* Non fragmented */
4609 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4611 skb_queue_tail(queue, skb);
4614 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4616 skb_shinfo(skb)->frag_list = NULL;
4618 /* Queue all fragments atomically */
4619 spin_lock(&queue->lock);
4621 __skb_queue_tail(queue, skb);
4623 flags &= ~ACL_START;
4626 skb = list; list = list->next;
4628 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4629 hci_add_acl_hdr(skb, conn->handle, flags);
4631 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4633 __skb_queue_tail(queue, skb);
4636 spin_unlock(&queue->lock);
4640 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4642 struct hci_dev *hdev = chan->conn->hdev;
4644 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4646 hci_queue_acl(chan, &chan->data_q, skb, flags);
4648 queue_work(hdev->workqueue, &hdev->tx_work);
4652 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4654 struct hci_dev *hdev = conn->hdev;
4655 struct hci_sco_hdr hdr;
4657 BT_DBG("%s len %d", hdev->name, skb->len);
4659 hdr.handle = cpu_to_le16(conn->handle);
4660 hdr.dlen = skb->len;
4662 skb_push(skb, HCI_SCO_HDR_SIZE);
4663 skb_reset_transport_header(skb);
4664 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4666 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4668 skb_queue_tail(&conn->data_q, skb);
4669 queue_work(hdev->workqueue, &hdev->tx_work);
4672 /* ---- HCI TX task (outgoing data) ---- */
4674 /* HCI Connection scheduler */
4675 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4678 struct hci_conn_hash *h = &hdev->conn_hash;
4679 struct hci_conn *conn = NULL, *c;
4680 unsigned int num = 0, min = ~0;
4682 /* We don't have to lock device here. Connections are always
4683 * added and removed with TX task disabled. */
4687 list_for_each_entry_rcu(c, &h->list, list) {
4688 if (c->type != type || skb_queue_empty(&c->data_q))
4691 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4696 if (c->sent < min) {
4701 if (hci_conn_num(hdev, type) == num)
4710 switch (conn->type) {
4712 cnt = hdev->acl_cnt;
4716 cnt = hdev->sco_cnt;
4719 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4723 BT_ERR("Unknown link type");
4731 BT_DBG("conn %p quote %d", conn, *quote);
4735 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4737 struct hci_conn_hash *h = &hdev->conn_hash;
4740 BT_ERR("%s link tx timeout", hdev->name);
4744 /* Kill stalled connections */
4745 list_for_each_entry_rcu(c, &h->list, list) {
4746 if (c->type == type && c->sent) {
4747 BT_ERR("%s killing stalled connection %pMR",
4748 hdev->name, &c->dst);
4749 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4756 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4759 struct hci_conn_hash *h = &hdev->conn_hash;
4760 struct hci_chan *chan = NULL;
4761 unsigned int num = 0, min = ~0, cur_prio = 0;
4762 struct hci_conn *conn;
4763 int cnt, q, conn_num = 0;
4765 BT_DBG("%s", hdev->name);
4769 list_for_each_entry_rcu(conn, &h->list, list) {
4770 struct hci_chan *tmp;
4772 if (conn->type != type)
4775 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4780 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4781 struct sk_buff *skb;
4783 if (skb_queue_empty(&tmp->data_q))
4786 skb = skb_peek(&tmp->data_q);
4787 if (skb->priority < cur_prio)
4790 if (skb->priority > cur_prio) {
4793 cur_prio = skb->priority;
4798 if (conn->sent < min) {
4804 if (hci_conn_num(hdev, type) == conn_num)
4813 switch (chan->conn->type) {
4815 cnt = hdev->acl_cnt;
4818 cnt = hdev->block_cnt;
4822 cnt = hdev->sco_cnt;
4825 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4829 BT_ERR("Unknown link type");
4834 BT_DBG("chan %p quote %d", chan, *quote);
4838 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4840 struct hci_conn_hash *h = &hdev->conn_hash;
4841 struct hci_conn *conn;
4844 BT_DBG("%s", hdev->name);
4848 list_for_each_entry_rcu(conn, &h->list, list) {
4849 struct hci_chan *chan;
4851 if (conn->type != type)
4854 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4859 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4860 struct sk_buff *skb;
4867 if (skb_queue_empty(&chan->data_q))
4870 skb = skb_peek(&chan->data_q);
4871 if (skb->priority >= HCI_PRIO_MAX - 1)
4874 skb->priority = HCI_PRIO_MAX - 1;
4876 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4880 if (hci_conn_num(hdev, type) == num)
4888 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4890 /* Calculate count of blocks used by this packet */
4891 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4894 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4896 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4897 /* ACL tx timeout must be longer than maximum
4898 * link supervision timeout (40.9 seconds) */
4899 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4900 HCI_ACL_TX_TIMEOUT))
4901 hci_link_tx_to(hdev, ACL_LINK);
4905 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4907 unsigned int cnt = hdev->acl_cnt;
4908 struct hci_chan *chan;
4909 struct sk_buff *skb;
4912 __check_timeout(hdev, cnt);
4914 while (hdev->acl_cnt &&
4915 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4916 u32 priority = (skb_peek(&chan->data_q))->priority;
4917 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4918 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4919 skb->len, skb->priority);
4921 /* Stop if priority has changed */
4922 if (skb->priority < priority)
4925 skb = skb_dequeue(&chan->data_q);
4927 hci_conn_enter_active_mode(chan->conn,
4928 bt_cb(skb)->force_active);
4930 hci_send_frame(hdev, skb);
4931 hdev->acl_last_tx = jiffies;
4939 if (cnt != hdev->acl_cnt)
4940 hci_prio_recalculate(hdev, ACL_LINK);
4943 static void hci_sched_acl_blk(struct hci_dev *hdev)
4945 unsigned int cnt = hdev->block_cnt;
4946 struct hci_chan *chan;
4947 struct sk_buff *skb;
4951 __check_timeout(hdev, cnt);
4953 BT_DBG("%s", hdev->name);
4955 if (hdev->dev_type == HCI_AMP)
4960 while (hdev->block_cnt > 0 &&
4961 (chan = hci_chan_sent(hdev, type, "e))) {
4962 u32 priority = (skb_peek(&chan->data_q))->priority;
4963 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4966 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4967 skb->len, skb->priority);
4969 /* Stop if priority has changed */
4970 if (skb->priority < priority)
4973 skb = skb_dequeue(&chan->data_q);
4975 blocks = __get_blocks(hdev, skb);
4976 if (blocks > hdev->block_cnt)
4979 hci_conn_enter_active_mode(chan->conn,
4980 bt_cb(skb)->force_active);
4982 hci_send_frame(hdev, skb);
4983 hdev->acl_last_tx = jiffies;
4985 hdev->block_cnt -= blocks;
4988 chan->sent += blocks;
4989 chan->conn->sent += blocks;
4993 if (cnt != hdev->block_cnt)
4994 hci_prio_recalculate(hdev, type);
4997 static void hci_sched_acl(struct hci_dev *hdev)
4999 BT_DBG("%s", hdev->name);
5001 /* No ACL link over BR/EDR controller */
5002 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5005 /* No AMP link over AMP controller */
5006 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5009 switch (hdev->flow_ctl_mode) {
5010 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5011 hci_sched_acl_pkt(hdev);
5014 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5015 hci_sched_acl_blk(hdev);
5021 static void hci_sched_sco(struct hci_dev *hdev)
5023 struct hci_conn *conn;
5024 struct sk_buff *skb;
5027 BT_DBG("%s", hdev->name);
5029 if (!hci_conn_num(hdev, SCO_LINK))
5032 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
5033 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5034 BT_DBG("skb %p len %d", skb, skb->len);
5035 hci_send_frame(hdev, skb);
5038 if (conn->sent == ~0)
5044 static void hci_sched_esco(struct hci_dev *hdev)
5046 struct hci_conn *conn;
5047 struct sk_buff *skb;
5050 BT_DBG("%s", hdev->name);
5052 if (!hci_conn_num(hdev, ESCO_LINK))
5055 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5057 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5058 BT_DBG("skb %p len %d", skb, skb->len);
5059 hci_send_frame(hdev, skb);
5062 if (conn->sent == ~0)
5068 static void hci_sched_le(struct hci_dev *hdev)
5070 struct hci_chan *chan;
5071 struct sk_buff *skb;
5072 int quote, cnt, tmp;
5074 BT_DBG("%s", hdev->name);
5076 if (!hci_conn_num(hdev, LE_LINK))
5079 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5080 /* LE tx timeout must be longer than maximum
5081 * link supervision timeout (40.9 seconds) */
5082 if (!hdev->le_cnt && hdev->le_pkts &&
5083 time_after(jiffies, hdev->le_last_tx + HZ * 45))
5084 hci_link_tx_to(hdev, LE_LINK);
5087 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5089 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
5090 u32 priority = (skb_peek(&chan->data_q))->priority;
5091 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5092 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5093 skb->len, skb->priority);
5095 /* Stop if priority has changed */
5096 if (skb->priority < priority)
5099 skb = skb_dequeue(&chan->data_q);
5101 hci_send_frame(hdev, skb);
5102 hdev->le_last_tx = jiffies;
5113 hdev->acl_cnt = cnt;
5116 hci_prio_recalculate(hdev, LE_LINK);
5119 static void hci_tx_work(struct work_struct *work)
5121 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5122 struct sk_buff *skb;
5124 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5125 hdev->sco_cnt, hdev->le_cnt);
5127 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5128 /* Schedule queues and send stuff to HCI driver */
5129 hci_sched_acl(hdev);
5130 hci_sched_sco(hdev);
5131 hci_sched_esco(hdev);
5135 /* Send next queued raw (unknown type) packet */
5136 while ((skb = skb_dequeue(&hdev->raw_q)))
5137 hci_send_frame(hdev, skb);
5140 /* ----- HCI RX task (incoming data processing) ----- */
5142 /* ACL data packet */
5143 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5145 struct hci_acl_hdr *hdr = (void *) skb->data;
5146 struct hci_conn *conn;
5147 __u16 handle, flags;
5149 skb_pull(skb, HCI_ACL_HDR_SIZE);
5151 handle = __le16_to_cpu(hdr->handle);
5152 flags = hci_flags(handle);
5153 handle = hci_handle(handle);
5155 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5158 hdev->stat.acl_rx++;
5161 conn = hci_conn_hash_lookup_handle(hdev, handle);
5162 hci_dev_unlock(hdev);
5165 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5167 /* Send to upper protocol */
5168 l2cap_recv_acldata(conn, skb, flags);
5171 BT_ERR("%s ACL packet for unknown connection handle %d",
5172 hdev->name, handle);
5178 /* SCO data packet */
5179 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5181 struct hci_sco_hdr *hdr = (void *) skb->data;
5182 struct hci_conn *conn;
5185 skb_pull(skb, HCI_SCO_HDR_SIZE);
5187 handle = __le16_to_cpu(hdr->handle);
5189 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5191 hdev->stat.sco_rx++;
5194 conn = hci_conn_hash_lookup_handle(hdev, handle);
5195 hci_dev_unlock(hdev);
5198 /* Send to upper protocol */
5199 sco_recv_scodata(conn, skb);
5202 BT_ERR("%s SCO packet for unknown connection handle %d",
5203 hdev->name, handle);
5209 static bool hci_req_is_complete(struct hci_dev *hdev)
5211 struct sk_buff *skb;
5213 skb = skb_peek(&hdev->cmd_q);
5217 return bt_cb(skb)->req.start;
5220 static void hci_resend_last(struct hci_dev *hdev)
5222 struct hci_command_hdr *sent;
5223 struct sk_buff *skb;
5226 if (!hdev->sent_cmd)
5229 sent = (void *) hdev->sent_cmd->data;
5230 opcode = __le16_to_cpu(sent->opcode);
5231 if (opcode == HCI_OP_RESET)
5234 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5238 skb_queue_head(&hdev->cmd_q, skb);
5239 queue_work(hdev->workqueue, &hdev->cmd_work);
5242 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5244 hci_req_complete_t req_complete = NULL;
5245 struct sk_buff *skb;
5246 unsigned long flags;
5248 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5250 /* If the completed command doesn't match the last one that was
5251 * sent we need to do special handling of it.
5253 if (!hci_sent_cmd_data(hdev, opcode)) {
5254 /* Some CSR based controllers generate a spontaneous
5255 * reset complete event during init and any pending
5256 * command will never be completed. In such a case we
5257 * need to resend whatever was the last sent
5260 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5261 hci_resend_last(hdev);
5266 /* If the command succeeded and there's still more commands in
5267 * this request the request is not yet complete.
5269 if (!status && !hci_req_is_complete(hdev))
5272 /* If this was the last command in a request the complete
5273 * callback would be found in hdev->sent_cmd instead of the
5274 * command queue (hdev->cmd_q).
5276 if (hdev->sent_cmd) {
5277 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5280 /* We must set the complete callback to NULL to
5281 * avoid calling the callback more than once if
5282 * this function gets called again.
5284 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5290 /* Remove all pending commands belonging to this request */
5291 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5292 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5293 if (bt_cb(skb)->req.start) {
5294 __skb_queue_head(&hdev->cmd_q, skb);
5298 req_complete = bt_cb(skb)->req.complete;
5301 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5305 req_complete(hdev, status);
5308 static void hci_rx_work(struct work_struct *work)
5310 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5311 struct sk_buff *skb;
5313 BT_DBG("%s", hdev->name);
5315 while ((skb = skb_dequeue(&hdev->rx_q))) {
5316 /* Send copy to monitor */
5317 hci_send_to_monitor(hdev, skb);
5319 if (atomic_read(&hdev->promisc)) {
5320 /* Send copy to the sockets */
5321 hci_send_to_sock(hdev, skb);
5324 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5329 if (test_bit(HCI_INIT, &hdev->flags)) {
5330 /* Don't process data packets in this states. */
5331 switch (bt_cb(skb)->pkt_type) {
5332 case HCI_ACLDATA_PKT:
5333 case HCI_SCODATA_PKT:
5340 switch (bt_cb(skb)->pkt_type) {
5342 BT_DBG("%s Event packet", hdev->name);
5343 hci_event_packet(hdev, skb);
5346 case HCI_ACLDATA_PKT:
5347 BT_DBG("%s ACL data packet", hdev->name);
5348 hci_acldata_packet(hdev, skb);
5351 case HCI_SCODATA_PKT:
5352 BT_DBG("%s SCO data packet", hdev->name);
5353 hci_scodata_packet(hdev, skb);
5363 static void hci_cmd_work(struct work_struct *work)
5365 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5366 struct sk_buff *skb;
5368 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5369 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5371 /* Send queued commands */
5372 if (atomic_read(&hdev->cmd_cnt)) {
5373 skb = skb_dequeue(&hdev->cmd_q);
5377 kfree_skb(hdev->sent_cmd);
5379 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5380 if (hdev->sent_cmd) {
5381 atomic_dec(&hdev->cmd_cnt);
5382 hci_send_frame(hdev, skb);
5383 if (test_bit(HCI_RESET, &hdev->flags))
5384 cancel_delayed_work(&hdev->cmd_timer);
5386 schedule_delayed_work(&hdev->cmd_timer,
5389 skb_queue_head(&hdev->cmd_q, skb);
5390 queue_work(hdev->workqueue, &hdev->cmd_work);
5395 void hci_req_add_le_scan_disable(struct hci_request *req)
5397 struct hci_cp_le_set_scan_enable cp;
5399 memset(&cp, 0, sizeof(cp));
5400 cp.enable = LE_SCAN_DISABLE;
5401 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5404 void hci_req_add_le_passive_scan(struct hci_request *req)
5406 struct hci_cp_le_set_scan_param param_cp;
5407 struct hci_cp_le_set_scan_enable enable_cp;
5408 struct hci_dev *hdev = req->hdev;
5411 /* Set require_privacy to false since no SCAN_REQ are send
5412 * during passive scanning. Not using an unresolvable address
5413 * here is important so that peer devices using direct
5414 * advertising with our address will be correctly reported
5415 * by the controller.
5417 if (hci_update_random_address(req, false, &own_addr_type))
5420 memset(¶m_cp, 0, sizeof(param_cp));
5421 param_cp.type = LE_SCAN_PASSIVE;
5422 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5423 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5424 param_cp.own_address_type = own_addr_type;
5425 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5428 memset(&enable_cp, 0, sizeof(enable_cp));
5429 enable_cp.enable = LE_SCAN_ENABLE;
5430 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5431 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5435 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5438 BT_DBG("HCI request failed to update background scanning: "
5439 "status 0x%2.2x", status);
5442 /* This function controls the background scanning based on hdev->pend_le_conns
5443 * list. If there are pending LE connection we start the background scanning,
5444 * otherwise we stop it.
5446 * This function requires the caller holds hdev->lock.
5448 void hci_update_background_scan(struct hci_dev *hdev)
5450 struct hci_request req;
5451 struct hci_conn *conn;
5454 if (!test_bit(HCI_UP, &hdev->flags) ||
5455 test_bit(HCI_INIT, &hdev->flags) ||
5456 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5457 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5458 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5459 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5462 /* No point in doing scanning if LE support hasn't been enabled */
5463 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5466 /* If discovery is active don't interfere with it */
5467 if (hdev->discovery.state != DISCOVERY_STOPPED)
5470 hci_req_init(&req, hdev);
5472 if (list_empty(&hdev->pend_le_conns) &&
5473 list_empty(&hdev->pend_le_reports)) {
5474 /* If there is no pending LE connections or devices
5475 * to be scanned for, we should stop the background
5479 /* If controller is not scanning we are done. */
5480 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5483 hci_req_add_le_scan_disable(&req);
5485 BT_DBG("%s stopping background scanning", hdev->name);
5487 /* If there is at least one pending LE connection, we should
5488 * keep the background scan running.
5491 /* If controller is connecting, we should not start scanning
5492 * since some controllers are not able to scan and connect at
5495 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5499 /* If controller is currently scanning, we stop it to ensure we
5500 * don't miss any advertising (due to duplicates filter).
5502 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5503 hci_req_add_le_scan_disable(&req);
5505 hci_req_add_le_passive_scan(&req);
5507 BT_DBG("%s starting background scanning", hdev->name);
5510 err = hci_req_run(&req, update_background_scan_complete);
5512 BT_ERR("Failed to run HCI request: err %d", err);