2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
57 /* ---- HCI notifications ---- */
59 static void hci_notify(struct hci_dev *hdev, int event)
61 hci_sock_dev_event(hdev, event);
64 /* ---- HCI debugfs entries ---- */
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
69 struct hci_dev *hdev = file->private_data;
72 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
81 struct hci_dev *hdev = file->private_data;
84 size_t buf_size = min(count, (sizeof(buf)-1));
88 if (!test_bit(HCI_UP, &hdev->flags))
91 if (copy_from_user(buf, user_buf, buf_size))
95 if (strtobool(buf, &enable))
98 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
108 hci_req_unlock(hdev);
113 err = -bt_to_errno(skb->data[0]);
119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
124 static const struct file_operations dut_mode_fops = {
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
131 static int features_show(struct seq_file *f, void *ptr)
133 struct hci_dev *hdev = f->private;
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
152 hci_dev_unlock(hdev);
157 static int features_open(struct inode *inode, struct file *file)
159 return single_open(file, features_show, inode->i_private);
162 static const struct file_operations features_fops = {
163 .open = features_open,
166 .release = single_release,
169 static int blacklist_show(struct seq_file *f, void *p)
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
175 list_for_each_entry(b, &hdev->blacklist, list)
176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177 hci_dev_unlock(hdev);
182 static int blacklist_open(struct inode *inode, struct file *file)
184 return single_open(file, blacklist_show, inode->i_private);
187 static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
191 .release = single_release,
194 static int whitelist_show(struct seq_file *f, void *p)
196 struct hci_dev *hdev = f->private;
197 struct bdaddr_list *b;
200 list_for_each_entry(b, &hdev->whitelist, list)
201 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
202 hci_dev_unlock(hdev);
207 static int whitelist_open(struct inode *inode, struct file *file)
209 return single_open(file, whitelist_show, inode->i_private);
212 static const struct file_operations whitelist_fops = {
213 .open = whitelist_open,
216 .release = single_release,
219 static int uuids_show(struct seq_file *f, void *p)
221 struct hci_dev *hdev = f->private;
222 struct bt_uuid *uuid;
225 list_for_each_entry(uuid, &hdev->uuids, list) {
228 /* The Bluetooth UUID values are stored in big endian,
229 * but with reversed byte order. So convert them into
230 * the right order for the %pUb modifier.
232 for (i = 0; i < 16; i++)
233 val[i] = uuid->uuid[15 - i];
235 seq_printf(f, "%pUb\n", val);
237 hci_dev_unlock(hdev);
242 static int uuids_open(struct inode *inode, struct file *file)
244 return single_open(file, uuids_show, inode->i_private);
247 static const struct file_operations uuids_fops = {
251 .release = single_release,
254 static int inquiry_cache_show(struct seq_file *f, void *p)
256 struct hci_dev *hdev = f->private;
257 struct discovery_state *cache = &hdev->discovery;
258 struct inquiry_entry *e;
262 list_for_each_entry(e, &cache->all, all) {
263 struct inquiry_data *data = &e->data;
264 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
266 data->pscan_rep_mode, data->pscan_period_mode,
267 data->pscan_mode, data->dev_class[2],
268 data->dev_class[1], data->dev_class[0],
269 __le16_to_cpu(data->clock_offset),
270 data->rssi, data->ssp_mode, e->timestamp);
273 hci_dev_unlock(hdev);
278 static int inquiry_cache_open(struct inode *inode, struct file *file)
280 return single_open(file, inquiry_cache_show, inode->i_private);
283 static const struct file_operations inquiry_cache_fops = {
284 .open = inquiry_cache_open,
287 .release = single_release,
290 static int link_keys_show(struct seq_file *f, void *ptr)
292 struct hci_dev *hdev = f->private;
293 struct list_head *p, *n;
296 list_for_each_safe(p, n, &hdev->link_keys) {
297 struct link_key *key = list_entry(p, struct link_key, list);
298 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
299 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
301 hci_dev_unlock(hdev);
306 static int link_keys_open(struct inode *inode, struct file *file)
308 return single_open(file, link_keys_show, inode->i_private);
311 static const struct file_operations link_keys_fops = {
312 .open = link_keys_open,
315 .release = single_release,
318 static int dev_class_show(struct seq_file *f, void *ptr)
320 struct hci_dev *hdev = f->private;
323 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
324 hdev->dev_class[1], hdev->dev_class[0]);
325 hci_dev_unlock(hdev);
330 static int dev_class_open(struct inode *inode, struct file *file)
332 return single_open(file, dev_class_show, inode->i_private);
335 static const struct file_operations dev_class_fops = {
336 .open = dev_class_open,
339 .release = single_release,
342 static int voice_setting_get(void *data, u64 *val)
344 struct hci_dev *hdev = data;
347 *val = hdev->voice_setting;
348 hci_dev_unlock(hdev);
353 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
354 NULL, "0x%4.4llx\n");
356 static int auto_accept_delay_set(void *data, u64 val)
358 struct hci_dev *hdev = data;
361 hdev->auto_accept_delay = val;
362 hci_dev_unlock(hdev);
367 static int auto_accept_delay_get(void *data, u64 *val)
369 struct hci_dev *hdev = data;
372 *val = hdev->auto_accept_delay;
373 hci_dev_unlock(hdev);
378 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
379 auto_accept_delay_set, "%llu\n");
381 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
382 size_t count, loff_t *ppos)
384 struct hci_dev *hdev = file->private_data;
387 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
390 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
393 static ssize_t force_sc_support_write(struct file *file,
394 const char __user *user_buf,
395 size_t count, loff_t *ppos)
397 struct hci_dev *hdev = file->private_data;
399 size_t buf_size = min(count, (sizeof(buf)-1));
402 if (test_bit(HCI_UP, &hdev->flags))
405 if (copy_from_user(buf, user_buf, buf_size))
408 buf[buf_size] = '\0';
409 if (strtobool(buf, &enable))
412 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
415 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
420 static const struct file_operations force_sc_support_fops = {
422 .read = force_sc_support_read,
423 .write = force_sc_support_write,
424 .llseek = default_llseek,
427 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
428 size_t count, loff_t *ppos)
430 struct hci_dev *hdev = file->private_data;
433 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
436 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
439 static const struct file_operations sc_only_mode_fops = {
441 .read = sc_only_mode_read,
442 .llseek = default_llseek,
445 static int idle_timeout_set(void *data, u64 val)
447 struct hci_dev *hdev = data;
449 if (val != 0 && (val < 500 || val > 3600000))
453 hdev->idle_timeout = val;
454 hci_dev_unlock(hdev);
459 static int idle_timeout_get(void *data, u64 *val)
461 struct hci_dev *hdev = data;
464 *val = hdev->idle_timeout;
465 hci_dev_unlock(hdev);
470 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
471 idle_timeout_set, "%llu\n");
473 static int rpa_timeout_set(void *data, u64 val)
475 struct hci_dev *hdev = data;
477 /* Require the RPA timeout to be at least 30 seconds and at most
480 if (val < 30 || val > (60 * 60 * 24))
484 hdev->rpa_timeout = val;
485 hci_dev_unlock(hdev);
490 static int rpa_timeout_get(void *data, u64 *val)
492 struct hci_dev *hdev = data;
495 *val = hdev->rpa_timeout;
496 hci_dev_unlock(hdev);
501 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
502 rpa_timeout_set, "%llu\n");
504 static int sniff_min_interval_set(void *data, u64 val)
506 struct hci_dev *hdev = data;
508 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
512 hdev->sniff_min_interval = val;
513 hci_dev_unlock(hdev);
518 static int sniff_min_interval_get(void *data, u64 *val)
520 struct hci_dev *hdev = data;
523 *val = hdev->sniff_min_interval;
524 hci_dev_unlock(hdev);
529 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
530 sniff_min_interval_set, "%llu\n");
532 static int sniff_max_interval_set(void *data, u64 val)
534 struct hci_dev *hdev = data;
536 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
540 hdev->sniff_max_interval = val;
541 hci_dev_unlock(hdev);
546 static int sniff_max_interval_get(void *data, u64 *val)
548 struct hci_dev *hdev = data;
551 *val = hdev->sniff_max_interval;
552 hci_dev_unlock(hdev);
557 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
558 sniff_max_interval_set, "%llu\n");
560 static int conn_info_min_age_set(void *data, u64 val)
562 struct hci_dev *hdev = data;
564 if (val == 0 || val > hdev->conn_info_max_age)
568 hdev->conn_info_min_age = val;
569 hci_dev_unlock(hdev);
574 static int conn_info_min_age_get(void *data, u64 *val)
576 struct hci_dev *hdev = data;
579 *val = hdev->conn_info_min_age;
580 hci_dev_unlock(hdev);
585 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
586 conn_info_min_age_set, "%llu\n");
588 static int conn_info_max_age_set(void *data, u64 val)
590 struct hci_dev *hdev = data;
592 if (val == 0 || val < hdev->conn_info_min_age)
596 hdev->conn_info_max_age = val;
597 hci_dev_unlock(hdev);
602 static int conn_info_max_age_get(void *data, u64 *val)
604 struct hci_dev *hdev = data;
607 *val = hdev->conn_info_max_age;
608 hci_dev_unlock(hdev);
613 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
614 conn_info_max_age_set, "%llu\n");
616 static int identity_show(struct seq_file *f, void *p)
618 struct hci_dev *hdev = f->private;
624 hci_copy_identity_address(hdev, &addr, &addr_type);
626 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
627 16, hdev->irk, &hdev->rpa);
629 hci_dev_unlock(hdev);
634 static int identity_open(struct inode *inode, struct file *file)
636 return single_open(file, identity_show, inode->i_private);
639 static const struct file_operations identity_fops = {
640 .open = identity_open,
643 .release = single_release,
646 static int random_address_show(struct seq_file *f, void *p)
648 struct hci_dev *hdev = f->private;
651 seq_printf(f, "%pMR\n", &hdev->random_addr);
652 hci_dev_unlock(hdev);
657 static int random_address_open(struct inode *inode, struct file *file)
659 return single_open(file, random_address_show, inode->i_private);
662 static const struct file_operations random_address_fops = {
663 .open = random_address_open,
666 .release = single_release,
669 static int static_address_show(struct seq_file *f, void *p)
671 struct hci_dev *hdev = f->private;
674 seq_printf(f, "%pMR\n", &hdev->static_addr);
675 hci_dev_unlock(hdev);
680 static int static_address_open(struct inode *inode, struct file *file)
682 return single_open(file, static_address_show, inode->i_private);
685 static const struct file_operations static_address_fops = {
686 .open = static_address_open,
689 .release = single_release,
692 static ssize_t force_static_address_read(struct file *file,
693 char __user *user_buf,
694 size_t count, loff_t *ppos)
696 struct hci_dev *hdev = file->private_data;
699 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
702 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
705 static ssize_t force_static_address_write(struct file *file,
706 const char __user *user_buf,
707 size_t count, loff_t *ppos)
709 struct hci_dev *hdev = file->private_data;
711 size_t buf_size = min(count, (sizeof(buf)-1));
714 if (test_bit(HCI_UP, &hdev->flags))
717 if (copy_from_user(buf, user_buf, buf_size))
720 buf[buf_size] = '\0';
721 if (strtobool(buf, &enable))
724 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
727 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
732 static const struct file_operations force_static_address_fops = {
734 .read = force_static_address_read,
735 .write = force_static_address_write,
736 .llseek = default_llseek,
739 static int white_list_show(struct seq_file *f, void *ptr)
741 struct hci_dev *hdev = f->private;
742 struct bdaddr_list *b;
745 list_for_each_entry(b, &hdev->le_white_list, list)
746 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
747 hci_dev_unlock(hdev);
752 static int white_list_open(struct inode *inode, struct file *file)
754 return single_open(file, white_list_show, inode->i_private);
757 static const struct file_operations white_list_fops = {
758 .open = white_list_open,
761 .release = single_release,
764 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
766 struct hci_dev *hdev = f->private;
767 struct list_head *p, *n;
770 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
771 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
772 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
773 &irk->bdaddr, irk->addr_type,
774 16, irk->val, &irk->rpa);
776 hci_dev_unlock(hdev);
781 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
783 return single_open(file, identity_resolving_keys_show,
787 static const struct file_operations identity_resolving_keys_fops = {
788 .open = identity_resolving_keys_open,
791 .release = single_release,
794 static int long_term_keys_show(struct seq_file *f, void *ptr)
796 struct hci_dev *hdev = f->private;
797 struct list_head *p, *n;
800 list_for_each_safe(p, n, &hdev->long_term_keys) {
801 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
802 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
803 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
804 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
805 __le64_to_cpu(ltk->rand), 16, ltk->val);
807 hci_dev_unlock(hdev);
812 static int long_term_keys_open(struct inode *inode, struct file *file)
814 return single_open(file, long_term_keys_show, inode->i_private);
817 static const struct file_operations long_term_keys_fops = {
818 .open = long_term_keys_open,
821 .release = single_release,
824 static int conn_min_interval_set(void *data, u64 val)
826 struct hci_dev *hdev = data;
828 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
832 hdev->le_conn_min_interval = val;
833 hci_dev_unlock(hdev);
838 static int conn_min_interval_get(void *data, u64 *val)
840 struct hci_dev *hdev = data;
843 *val = hdev->le_conn_min_interval;
844 hci_dev_unlock(hdev);
849 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
850 conn_min_interval_set, "%llu\n");
852 static int conn_max_interval_set(void *data, u64 val)
854 struct hci_dev *hdev = data;
856 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
860 hdev->le_conn_max_interval = val;
861 hci_dev_unlock(hdev);
866 static int conn_max_interval_get(void *data, u64 *val)
868 struct hci_dev *hdev = data;
871 *val = hdev->le_conn_max_interval;
872 hci_dev_unlock(hdev);
877 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
878 conn_max_interval_set, "%llu\n");
880 static int conn_latency_set(void *data, u64 val)
882 struct hci_dev *hdev = data;
888 hdev->le_conn_latency = val;
889 hci_dev_unlock(hdev);
894 static int conn_latency_get(void *data, u64 *val)
896 struct hci_dev *hdev = data;
899 *val = hdev->le_conn_latency;
900 hci_dev_unlock(hdev);
905 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
906 conn_latency_set, "%llu\n");
908 static int supervision_timeout_set(void *data, u64 val)
910 struct hci_dev *hdev = data;
912 if (val < 0x000a || val > 0x0c80)
916 hdev->le_supv_timeout = val;
917 hci_dev_unlock(hdev);
922 static int supervision_timeout_get(void *data, u64 *val)
924 struct hci_dev *hdev = data;
927 *val = hdev->le_supv_timeout;
928 hci_dev_unlock(hdev);
933 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
934 supervision_timeout_set, "%llu\n");
936 static int adv_channel_map_set(void *data, u64 val)
938 struct hci_dev *hdev = data;
940 if (val < 0x01 || val > 0x07)
944 hdev->le_adv_channel_map = val;
945 hci_dev_unlock(hdev);
950 static int adv_channel_map_get(void *data, u64 *val)
952 struct hci_dev *hdev = data;
955 *val = hdev->le_adv_channel_map;
956 hci_dev_unlock(hdev);
961 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
962 adv_channel_map_set, "%llu\n");
964 static int device_list_show(struct seq_file *f, void *ptr)
966 struct hci_dev *hdev = f->private;
967 struct hci_conn_params *p;
970 list_for_each_entry(p, &hdev->le_conn_params, list) {
971 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
974 hci_dev_unlock(hdev);
979 static int device_list_open(struct inode *inode, struct file *file)
981 return single_open(file, device_list_show, inode->i_private);
984 static const struct file_operations device_list_fops = {
985 .open = device_list_open,
988 .release = single_release,
991 /* ---- HCI requests ---- */
993 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
995 BT_DBG("%s result 0x%2.2x", hdev->name, result);
997 if (hdev->req_status == HCI_REQ_PEND) {
998 hdev->req_result = result;
999 hdev->req_status = HCI_REQ_DONE;
1000 wake_up_interruptible(&hdev->req_wait_q);
1004 static void hci_req_cancel(struct hci_dev *hdev, int err)
1006 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1008 if (hdev->req_status == HCI_REQ_PEND) {
1009 hdev->req_result = err;
1010 hdev->req_status = HCI_REQ_CANCELED;
1011 wake_up_interruptible(&hdev->req_wait_q);
1015 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1018 struct hci_ev_cmd_complete *ev;
1019 struct hci_event_hdr *hdr;
1020 struct sk_buff *skb;
1024 skb = hdev->recv_evt;
1025 hdev->recv_evt = NULL;
1027 hci_dev_unlock(hdev);
1030 return ERR_PTR(-ENODATA);
1032 if (skb->len < sizeof(*hdr)) {
1033 BT_ERR("Too short HCI event");
1037 hdr = (void *) skb->data;
1038 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1041 if (hdr->evt != event)
1046 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1047 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1051 if (skb->len < sizeof(*ev)) {
1052 BT_ERR("Too short cmd_complete event");
1056 ev = (void *) skb->data;
1057 skb_pull(skb, sizeof(*ev));
1059 if (opcode == __le16_to_cpu(ev->opcode))
1062 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1063 __le16_to_cpu(ev->opcode));
1067 return ERR_PTR(-ENODATA);
1070 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1071 const void *param, u8 event, u32 timeout)
1073 DECLARE_WAITQUEUE(wait, current);
1074 struct hci_request req;
1077 BT_DBG("%s", hdev->name);
1079 hci_req_init(&req, hdev);
1081 hci_req_add_ev(&req, opcode, plen, param, event);
1083 hdev->req_status = HCI_REQ_PEND;
1085 err = hci_req_run(&req, hci_req_sync_complete);
1087 return ERR_PTR(err);
1089 add_wait_queue(&hdev->req_wait_q, &wait);
1090 set_current_state(TASK_INTERRUPTIBLE);
1092 schedule_timeout(timeout);
1094 remove_wait_queue(&hdev->req_wait_q, &wait);
1096 if (signal_pending(current))
1097 return ERR_PTR(-EINTR);
1099 switch (hdev->req_status) {
1101 err = -bt_to_errno(hdev->req_result);
1104 case HCI_REQ_CANCELED:
1105 err = -hdev->req_result;
1113 hdev->req_status = hdev->req_result = 0;
1115 BT_DBG("%s end: err %d", hdev->name, err);
1118 return ERR_PTR(err);
1120 return hci_get_cmd_complete(hdev, opcode, event);
1122 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1124 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1125 const void *param, u32 timeout)
1127 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1129 EXPORT_SYMBOL(__hci_cmd_sync);
1131 /* Execute request and wait for completion. */
1132 static int __hci_req_sync(struct hci_dev *hdev,
1133 void (*func)(struct hci_request *req,
1135 unsigned long opt, __u32 timeout)
1137 struct hci_request req;
1138 DECLARE_WAITQUEUE(wait, current);
1141 BT_DBG("%s start", hdev->name);
1143 hci_req_init(&req, hdev);
1145 hdev->req_status = HCI_REQ_PEND;
1149 err = hci_req_run(&req, hci_req_sync_complete);
1151 hdev->req_status = 0;
1153 /* ENODATA means the HCI request command queue is empty.
1154 * This can happen when a request with conditionals doesn't
1155 * trigger any commands to be sent. This is normal behavior
1156 * and should not trigger an error return.
1158 if (err == -ENODATA)
1164 add_wait_queue(&hdev->req_wait_q, &wait);
1165 set_current_state(TASK_INTERRUPTIBLE);
1167 schedule_timeout(timeout);
1169 remove_wait_queue(&hdev->req_wait_q, &wait);
1171 if (signal_pending(current))
1174 switch (hdev->req_status) {
1176 err = -bt_to_errno(hdev->req_result);
1179 case HCI_REQ_CANCELED:
1180 err = -hdev->req_result;
1188 hdev->req_status = hdev->req_result = 0;
1190 BT_DBG("%s end: err %d", hdev->name, err);
1195 static int hci_req_sync(struct hci_dev *hdev,
1196 void (*req)(struct hci_request *req,
1198 unsigned long opt, __u32 timeout)
1202 if (!test_bit(HCI_UP, &hdev->flags))
1205 /* Serialize all requests */
1207 ret = __hci_req_sync(hdev, req, opt, timeout);
1208 hci_req_unlock(hdev);
1213 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1215 BT_DBG("%s %ld", req->hdev->name, opt);
1218 set_bit(HCI_RESET, &req->hdev->flags);
1219 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1222 static void bredr_init(struct hci_request *req)
1224 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1226 /* Read Local Supported Features */
1227 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1229 /* Read Local Version */
1230 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1232 /* Read BD Address */
1233 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1236 static void amp_init(struct hci_request *req)
1238 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1240 /* Read Local Version */
1241 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1243 /* Read Local Supported Commands */
1244 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1246 /* Read Local Supported Features */
1247 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1249 /* Read Local AMP Info */
1250 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1252 /* Read Data Blk size */
1253 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1255 /* Read Flow Control Mode */
1256 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1258 /* Read Location Data */
1259 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1262 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1264 struct hci_dev *hdev = req->hdev;
1266 BT_DBG("%s %ld", hdev->name, opt);
1269 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1270 hci_reset_req(req, 0);
1272 switch (hdev->dev_type) {
1282 BT_ERR("Unknown device type %d", hdev->dev_type);
1287 static void bredr_setup(struct hci_request *req)
1289 struct hci_dev *hdev = req->hdev;
1294 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1295 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1297 /* Read Class of Device */
1298 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1300 /* Read Local Name */
1301 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1303 /* Read Voice Setting */
1304 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1306 /* Read Number of Supported IAC */
1307 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1309 /* Read Current IAC LAP */
1310 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1312 /* Clear Event Filters */
1313 flt_type = HCI_FLT_CLEAR_ALL;
1314 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1316 /* Connection accept timeout ~20 secs */
1317 param = cpu_to_le16(0x7d00);
1318 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1320 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1321 * but it does not support page scan related HCI commands.
1323 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1324 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1325 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1329 static void le_setup(struct hci_request *req)
1331 struct hci_dev *hdev = req->hdev;
1333 /* Read LE Buffer Size */
1334 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1336 /* Read LE Local Supported Features */
1337 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1339 /* Read LE Supported States */
1340 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1342 /* Read LE Advertising Channel TX Power */
1343 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1345 /* Read LE White List Size */
1346 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1348 /* Clear LE White List */
1349 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1351 /* LE-only controllers have LE implicitly enabled */
1352 if (!lmp_bredr_capable(hdev))
1353 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1356 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1358 if (lmp_ext_inq_capable(hdev))
1361 if (lmp_inq_rssi_capable(hdev))
1364 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1365 hdev->lmp_subver == 0x0757)
1368 if (hdev->manufacturer == 15) {
1369 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1371 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1373 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1377 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1378 hdev->lmp_subver == 0x1805)
1384 static void hci_setup_inquiry_mode(struct hci_request *req)
1388 mode = hci_get_inquiry_mode(req->hdev);
1390 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1393 static void hci_setup_event_mask(struct hci_request *req)
1395 struct hci_dev *hdev = req->hdev;
1397 /* The second byte is 0xff instead of 0x9f (two reserved bits
1398 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1399 * command otherwise.
1401 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1403 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1404 * any event mask for pre 1.2 devices.
1406 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1409 if (lmp_bredr_capable(hdev)) {
1410 events[4] |= 0x01; /* Flow Specification Complete */
1411 events[4] |= 0x02; /* Inquiry Result with RSSI */
1412 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1413 events[5] |= 0x08; /* Synchronous Connection Complete */
1414 events[5] |= 0x10; /* Synchronous Connection Changed */
1416 /* Use a different default for LE-only devices */
1417 memset(events, 0, sizeof(events));
1418 events[0] |= 0x10; /* Disconnection Complete */
1419 events[0] |= 0x80; /* Encryption Change */
1420 events[1] |= 0x08; /* Read Remote Version Information Complete */
1421 events[1] |= 0x20; /* Command Complete */
1422 events[1] |= 0x40; /* Command Status */
1423 events[1] |= 0x80; /* Hardware Error */
1424 events[2] |= 0x04; /* Number of Completed Packets */
1425 events[3] |= 0x02; /* Data Buffer Overflow */
1426 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1429 if (lmp_inq_rssi_capable(hdev))
1430 events[4] |= 0x02; /* Inquiry Result with RSSI */
1432 if (lmp_sniffsubr_capable(hdev))
1433 events[5] |= 0x20; /* Sniff Subrating */
1435 if (lmp_pause_enc_capable(hdev))
1436 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1438 if (lmp_ext_inq_capable(hdev))
1439 events[5] |= 0x40; /* Extended Inquiry Result */
1441 if (lmp_no_flush_capable(hdev))
1442 events[7] |= 0x01; /* Enhanced Flush Complete */
1444 if (lmp_lsto_capable(hdev))
1445 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1447 if (lmp_ssp_capable(hdev)) {
1448 events[6] |= 0x01; /* IO Capability Request */
1449 events[6] |= 0x02; /* IO Capability Response */
1450 events[6] |= 0x04; /* User Confirmation Request */
1451 events[6] |= 0x08; /* User Passkey Request */
1452 events[6] |= 0x10; /* Remote OOB Data Request */
1453 events[6] |= 0x20; /* Simple Pairing Complete */
1454 events[7] |= 0x04; /* User Passkey Notification */
1455 events[7] |= 0x08; /* Keypress Notification */
1456 events[7] |= 0x10; /* Remote Host Supported
1457 * Features Notification
1461 if (lmp_le_capable(hdev))
1462 events[7] |= 0x20; /* LE Meta-Event */
1464 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1467 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1469 struct hci_dev *hdev = req->hdev;
1471 if (lmp_bredr_capable(hdev))
1474 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1476 if (lmp_le_capable(hdev))
1479 hci_setup_event_mask(req);
1481 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1482 * local supported commands HCI command.
1484 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1485 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1487 if (lmp_ssp_capable(hdev)) {
1488 /* When SSP is available, then the host features page
1489 * should also be available as well. However some
1490 * controllers list the max_page as 0 as long as SSP
1491 * has not been enabled. To achieve proper debugging
1492 * output, force the minimum max_page to 1 at least.
1494 hdev->max_page = 0x01;
1496 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1498 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1499 sizeof(mode), &mode);
1501 struct hci_cp_write_eir cp;
1503 memset(hdev->eir, 0, sizeof(hdev->eir));
1504 memset(&cp, 0, sizeof(cp));
1506 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1510 if (lmp_inq_rssi_capable(hdev))
1511 hci_setup_inquiry_mode(req);
1513 if (lmp_inq_tx_pwr_capable(hdev))
1514 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1516 if (lmp_ext_feat_capable(hdev)) {
1517 struct hci_cp_read_local_ext_features cp;
1520 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1524 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1526 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1531 static void hci_setup_link_policy(struct hci_request *req)
1533 struct hci_dev *hdev = req->hdev;
1534 struct hci_cp_write_def_link_policy cp;
1535 u16 link_policy = 0;
1537 if (lmp_rswitch_capable(hdev))
1538 link_policy |= HCI_LP_RSWITCH;
1539 if (lmp_hold_capable(hdev))
1540 link_policy |= HCI_LP_HOLD;
1541 if (lmp_sniff_capable(hdev))
1542 link_policy |= HCI_LP_SNIFF;
1543 if (lmp_park_capable(hdev))
1544 link_policy |= HCI_LP_PARK;
1546 cp.policy = cpu_to_le16(link_policy);
1547 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1550 static void hci_set_le_support(struct hci_request *req)
1552 struct hci_dev *hdev = req->hdev;
1553 struct hci_cp_write_le_host_supported cp;
1555 /* LE-only devices do not support explicit enablement */
1556 if (!lmp_bredr_capable(hdev))
1559 memset(&cp, 0, sizeof(cp));
1561 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1563 cp.simul = lmp_le_br_capable(hdev);
1566 if (cp.le != lmp_host_le_capable(hdev))
1567 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1571 static void hci_set_event_mask_page_2(struct hci_request *req)
1573 struct hci_dev *hdev = req->hdev;
1574 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1576 /* If Connectionless Slave Broadcast master role is supported
1577 * enable all necessary events for it.
1579 if (lmp_csb_master_capable(hdev)) {
1580 events[1] |= 0x40; /* Triggered Clock Capture */
1581 events[1] |= 0x80; /* Synchronization Train Complete */
1582 events[2] |= 0x10; /* Slave Page Response Timeout */
1583 events[2] |= 0x20; /* CSB Channel Map Change */
1586 /* If Connectionless Slave Broadcast slave role is supported
1587 * enable all necessary events for it.
1589 if (lmp_csb_slave_capable(hdev)) {
1590 events[2] |= 0x01; /* Synchronization Train Received */
1591 events[2] |= 0x02; /* CSB Receive */
1592 events[2] |= 0x04; /* CSB Timeout */
1593 events[2] |= 0x08; /* Truncated Page Complete */
1596 /* Enable Authenticated Payload Timeout Expired event if supported */
1597 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1600 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1603 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1605 struct hci_dev *hdev = req->hdev;
1608 /* Some Broadcom based Bluetooth controllers do not support the
1609 * Delete Stored Link Key command. They are clearly indicating its
1610 * absence in the bit mask of supported commands.
1612 * Check the supported commands and only if the the command is marked
1613 * as supported send it. If not supported assume that the controller
1614 * does not have actual support for stored link keys which makes this
1615 * command redundant anyway.
1617 * Some controllers indicate that they support handling deleting
1618 * stored link keys, but they don't. The quirk lets a driver
1619 * just disable this command.
1621 if (hdev->commands[6] & 0x80 &&
1622 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1623 struct hci_cp_delete_stored_link_key cp;
1625 bacpy(&cp.bdaddr, BDADDR_ANY);
1626 cp.delete_all = 0x01;
1627 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1631 if (hdev->commands[5] & 0x10)
1632 hci_setup_link_policy(req);
1634 if (lmp_le_capable(hdev)) {
1637 memset(events, 0, sizeof(events));
1640 /* If controller supports the Connection Parameters Request
1641 * Link Layer Procedure, enable the corresponding event.
1643 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1644 events[0] |= 0x20; /* LE Remote Connection
1648 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1651 hci_set_le_support(req);
1654 /* Read features beyond page 1 if available */
1655 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1656 struct hci_cp_read_local_ext_features cp;
1659 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1664 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1666 struct hci_dev *hdev = req->hdev;
1668 /* Set event mask page 2 if the HCI command for it is supported */
1669 if (hdev->commands[22] & 0x04)
1670 hci_set_event_mask_page_2(req);
1672 /* Check for Synchronization Train support */
1673 if (lmp_sync_train_capable(hdev))
1674 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1676 /* Enable Secure Connections if supported and configured */
1677 if ((lmp_sc_capable(hdev) ||
1678 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1679 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1681 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1682 sizeof(support), &support);
1686 static int __hci_init(struct hci_dev *hdev)
1690 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1694 /* The Device Under Test (DUT) mode is special and available for
1695 * all controller types. So just create it early on.
1697 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1698 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1702 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1703 * BR/EDR/LE type controllers. AMP controllers only need the
1706 if (hdev->dev_type != HCI_BREDR)
1709 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1713 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1717 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1721 /* Only create debugfs entries during the initial setup
1722 * phase and not every time the controller gets powered on.
1724 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1727 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1729 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1730 &hdev->manufacturer);
1731 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1732 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1733 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1735 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1737 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1739 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1740 &conn_info_min_age_fops);
1741 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1742 &conn_info_max_age_fops);
1744 if (lmp_bredr_capable(hdev)) {
1745 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1746 hdev, &inquiry_cache_fops);
1747 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1748 hdev, &link_keys_fops);
1749 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1750 hdev, &dev_class_fops);
1751 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1752 hdev, &voice_setting_fops);
1755 if (lmp_ssp_capable(hdev)) {
1756 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1757 hdev, &auto_accept_delay_fops);
1758 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1759 hdev, &force_sc_support_fops);
1760 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1761 hdev, &sc_only_mode_fops);
1764 if (lmp_sniff_capable(hdev)) {
1765 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1766 hdev, &idle_timeout_fops);
1767 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1768 hdev, &sniff_min_interval_fops);
1769 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1770 hdev, &sniff_max_interval_fops);
1773 if (lmp_le_capable(hdev)) {
1774 debugfs_create_file("identity", 0400, hdev->debugfs,
1775 hdev, &identity_fops);
1776 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1777 hdev, &rpa_timeout_fops);
1778 debugfs_create_file("random_address", 0444, hdev->debugfs,
1779 hdev, &random_address_fops);
1780 debugfs_create_file("static_address", 0444, hdev->debugfs,
1781 hdev, &static_address_fops);
1783 /* For controllers with a public address, provide a debug
1784 * option to force the usage of the configured static
1785 * address. By default the public address is used.
1787 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1788 debugfs_create_file("force_static_address", 0644,
1789 hdev->debugfs, hdev,
1790 &force_static_address_fops);
1792 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1793 &hdev->le_white_list_size);
1794 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1796 debugfs_create_file("identity_resolving_keys", 0400,
1797 hdev->debugfs, hdev,
1798 &identity_resolving_keys_fops);
1799 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1800 hdev, &long_term_keys_fops);
1801 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1802 hdev, &conn_min_interval_fops);
1803 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1804 hdev, &conn_max_interval_fops);
1805 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1806 hdev, &conn_latency_fops);
1807 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1808 hdev, &supervision_timeout_fops);
1809 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1810 hdev, &adv_channel_map_fops);
1811 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1813 debugfs_create_u16("discov_interleaved_timeout", 0644,
1815 &hdev->discov_interleaved_timeout);
1821 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1823 struct hci_dev *hdev = req->hdev;
1825 BT_DBG("%s %ld", hdev->name, opt);
1828 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1829 hci_reset_req(req, 0);
1831 /* Read Local Version */
1832 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1834 /* Read BD Address */
1835 if (hdev->set_bdaddr)
1836 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1839 static int __hci_unconf_init(struct hci_dev *hdev)
1843 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1846 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1853 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1857 BT_DBG("%s %x", req->hdev->name, scan);
1859 /* Inquiry and Page scans */
1860 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1863 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1867 BT_DBG("%s %x", req->hdev->name, auth);
1869 /* Authentication */
1870 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1873 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1877 BT_DBG("%s %x", req->hdev->name, encrypt);
1880 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1883 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1885 __le16 policy = cpu_to_le16(opt);
1887 BT_DBG("%s %x", req->hdev->name, policy);
1889 /* Default link policy */
1890 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1893 /* Get HCI device by index.
1894 * Device is held on return. */
1895 struct hci_dev *hci_dev_get(int index)
1897 struct hci_dev *hdev = NULL, *d;
1899 BT_DBG("%d", index);
1904 read_lock(&hci_dev_list_lock);
1905 list_for_each_entry(d, &hci_dev_list, list) {
1906 if (d->id == index) {
1907 hdev = hci_dev_hold(d);
1911 read_unlock(&hci_dev_list_lock);
1915 /* ---- Inquiry support ---- */
1917 bool hci_discovery_active(struct hci_dev *hdev)
1919 struct discovery_state *discov = &hdev->discovery;
1921 switch (discov->state) {
1922 case DISCOVERY_FINDING:
1923 case DISCOVERY_RESOLVING:
1931 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1933 int old_state = hdev->discovery.state;
1935 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1937 if (old_state == state)
1940 hdev->discovery.state = state;
1943 case DISCOVERY_STOPPED:
1944 hci_update_background_scan(hdev);
1946 if (old_state != DISCOVERY_STARTING)
1947 mgmt_discovering(hdev, 0);
1949 case DISCOVERY_STARTING:
1951 case DISCOVERY_FINDING:
1952 mgmt_discovering(hdev, 1);
1954 case DISCOVERY_RESOLVING:
1956 case DISCOVERY_STOPPING:
1961 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1963 struct discovery_state *cache = &hdev->discovery;
1964 struct inquiry_entry *p, *n;
1966 list_for_each_entry_safe(p, n, &cache->all, all) {
1971 INIT_LIST_HEAD(&cache->unknown);
1972 INIT_LIST_HEAD(&cache->resolve);
1975 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1978 struct discovery_state *cache = &hdev->discovery;
1979 struct inquiry_entry *e;
1981 BT_DBG("cache %p, %pMR", cache, bdaddr);
1983 list_for_each_entry(e, &cache->all, all) {
1984 if (!bacmp(&e->data.bdaddr, bdaddr))
1991 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1994 struct discovery_state *cache = &hdev->discovery;
1995 struct inquiry_entry *e;
1997 BT_DBG("cache %p, %pMR", cache, bdaddr);
1999 list_for_each_entry(e, &cache->unknown, list) {
2000 if (!bacmp(&e->data.bdaddr, bdaddr))
2007 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2011 struct discovery_state *cache = &hdev->discovery;
2012 struct inquiry_entry *e;
2014 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2016 list_for_each_entry(e, &cache->resolve, list) {
2017 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2019 if (!bacmp(&e->data.bdaddr, bdaddr))
2026 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2027 struct inquiry_entry *ie)
2029 struct discovery_state *cache = &hdev->discovery;
2030 struct list_head *pos = &cache->resolve;
2031 struct inquiry_entry *p;
2033 list_del(&ie->list);
2035 list_for_each_entry(p, &cache->resolve, list) {
2036 if (p->name_state != NAME_PENDING &&
2037 abs(p->data.rssi) >= abs(ie->data.rssi))
2042 list_add(&ie->list, pos);
2045 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2048 struct discovery_state *cache = &hdev->discovery;
2049 struct inquiry_entry *ie;
2052 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2054 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2056 if (!data->ssp_mode)
2057 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2059 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2061 if (!ie->data.ssp_mode)
2062 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2064 if (ie->name_state == NAME_NEEDED &&
2065 data->rssi != ie->data.rssi) {
2066 ie->data.rssi = data->rssi;
2067 hci_inquiry_cache_update_resolve(hdev, ie);
2073 /* Entry not in the cache. Add new one. */
2074 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2076 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2080 list_add(&ie->all, &cache->all);
2083 ie->name_state = NAME_KNOWN;
2085 ie->name_state = NAME_NOT_KNOWN;
2086 list_add(&ie->list, &cache->unknown);
2090 if (name_known && ie->name_state != NAME_KNOWN &&
2091 ie->name_state != NAME_PENDING) {
2092 ie->name_state = NAME_KNOWN;
2093 list_del(&ie->list);
2096 memcpy(&ie->data, data, sizeof(*data));
2097 ie->timestamp = jiffies;
2098 cache->timestamp = jiffies;
2100 if (ie->name_state == NAME_NOT_KNOWN)
2101 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2107 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2109 struct discovery_state *cache = &hdev->discovery;
2110 struct inquiry_info *info = (struct inquiry_info *) buf;
2111 struct inquiry_entry *e;
2114 list_for_each_entry(e, &cache->all, all) {
2115 struct inquiry_data *data = &e->data;
2120 bacpy(&info->bdaddr, &data->bdaddr);
2121 info->pscan_rep_mode = data->pscan_rep_mode;
2122 info->pscan_period_mode = data->pscan_period_mode;
2123 info->pscan_mode = data->pscan_mode;
2124 memcpy(info->dev_class, data->dev_class, 3);
2125 info->clock_offset = data->clock_offset;
2131 BT_DBG("cache %p, copied %d", cache, copied);
2135 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2137 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2138 struct hci_dev *hdev = req->hdev;
2139 struct hci_cp_inquiry cp;
2141 BT_DBG("%s", hdev->name);
2143 if (test_bit(HCI_INQUIRY, &hdev->flags))
2147 memcpy(&cp.lap, &ir->lap, 3);
2148 cp.length = ir->length;
2149 cp.num_rsp = ir->num_rsp;
2150 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2153 static int wait_inquiry(void *word)
2156 return signal_pending(current);
2159 int hci_inquiry(void __user *arg)
2161 __u8 __user *ptr = arg;
2162 struct hci_inquiry_req ir;
2163 struct hci_dev *hdev;
2164 int err = 0, do_inquiry = 0, max_rsp;
2168 if (copy_from_user(&ir, ptr, sizeof(ir)))
2171 hdev = hci_dev_get(ir.dev_id);
2175 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2180 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2185 if (hdev->dev_type != HCI_BREDR) {
2190 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2196 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2197 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2198 hci_inquiry_cache_flush(hdev);
2201 hci_dev_unlock(hdev);
2203 timeo = ir.length * msecs_to_jiffies(2000);
2206 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2211 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2212 * cleared). If it is interrupted by a signal, return -EINTR.
2214 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2215 TASK_INTERRUPTIBLE))
2219 /* for unlimited number of responses we will use buffer with
2222 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2224 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2225 * copy it to the user space.
2227 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2234 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2235 hci_dev_unlock(hdev);
2237 BT_DBG("num_rsp %d", ir.num_rsp);
2239 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2241 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2254 static int hci_dev_do_open(struct hci_dev *hdev)
2258 BT_DBG("%s %p", hdev->name, hdev);
2262 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2267 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2268 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2269 /* Check for rfkill but allow the HCI setup stage to
2270 * proceed (which in itself doesn't cause any RF activity).
2272 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2277 /* Check for valid public address or a configured static
2278 * random adddress, but let the HCI setup proceed to
2279 * be able to determine if there is a public address
2282 * In case of user channel usage, it is not important
2283 * if a public address or static random address is
2286 * This check is only valid for BR/EDR controllers
2287 * since AMP controllers do not have an address.
2289 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2290 hdev->dev_type == HCI_BREDR &&
2291 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2292 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2293 ret = -EADDRNOTAVAIL;
2298 if (test_bit(HCI_UP, &hdev->flags)) {
2303 if (hdev->open(hdev)) {
2308 atomic_set(&hdev->cmd_cnt, 1);
2309 set_bit(HCI_INIT, &hdev->flags);
2311 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2313 ret = hdev->setup(hdev);
2315 /* The transport driver can set these quirks before
2316 * creating the HCI device or in its setup callback.
2318 * In case any of them is set, the controller has to
2319 * start up as unconfigured.
2321 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2322 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2323 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2325 /* For an unconfigured controller it is required to
2326 * read at least the version information provided by
2327 * the Read Local Version Information command.
2329 * If the set_bdaddr driver callback is provided, then
2330 * also the original Bluetooth public device address
2331 * will be read using the Read BD Address command.
2333 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2334 ret = __hci_unconf_init(hdev);
2337 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2338 /* If public address change is configured, ensure that
2339 * the address gets programmed. If the driver does not
2340 * support changing the public address, fail the power
2343 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2345 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2347 ret = -EADDRNOTAVAIL;
2351 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2352 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2353 ret = __hci_init(hdev);
2356 clear_bit(HCI_INIT, &hdev->flags);
2360 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2361 set_bit(HCI_UP, &hdev->flags);
2362 hci_notify(hdev, HCI_DEV_UP);
2363 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2364 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2365 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2366 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2367 hdev->dev_type == HCI_BREDR) {
2369 mgmt_powered(hdev, 1);
2370 hci_dev_unlock(hdev);
2373 /* Init failed, cleanup */
2374 flush_work(&hdev->tx_work);
2375 flush_work(&hdev->cmd_work);
2376 flush_work(&hdev->rx_work);
2378 skb_queue_purge(&hdev->cmd_q);
2379 skb_queue_purge(&hdev->rx_q);
2384 if (hdev->sent_cmd) {
2385 kfree_skb(hdev->sent_cmd);
2386 hdev->sent_cmd = NULL;
2390 hdev->flags &= BIT(HCI_RAW);
2394 hci_req_unlock(hdev);
2398 /* ---- HCI ioctl helpers ---- */
2400 int hci_dev_open(__u16 dev)
2402 struct hci_dev *hdev;
2405 hdev = hci_dev_get(dev);
2409 /* Devices that are marked as unconfigured can only be powered
2410 * up as user channel. Trying to bring them up as normal devices
2411 * will result into a failure. Only user channel operation is
2414 * When this function is called for a user channel, the flag
2415 * HCI_USER_CHANNEL will be set first before attempting to
2418 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2419 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2424 /* We need to ensure that no other power on/off work is pending
2425 * before proceeding to call hci_dev_do_open. This is
2426 * particularly important if the setup procedure has not yet
2429 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2430 cancel_delayed_work(&hdev->power_off);
2432 /* After this call it is guaranteed that the setup procedure
2433 * has finished. This means that error conditions like RFKILL
2434 * or no valid public or static random address apply.
2436 flush_workqueue(hdev->req_workqueue);
2438 err = hci_dev_do_open(hdev);
2445 /* This function requires the caller holds hdev->lock */
2446 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2448 struct hci_conn_params *p;
2450 list_for_each_entry(p, &hdev->le_conn_params, list)
2451 list_del_init(&p->action);
2453 BT_DBG("All LE pending actions cleared");
2456 static int hci_dev_do_close(struct hci_dev *hdev)
2458 BT_DBG("%s %p", hdev->name, hdev);
2460 cancel_delayed_work(&hdev->power_off);
2462 hci_req_cancel(hdev, ENODEV);
2465 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2466 cancel_delayed_work_sync(&hdev->cmd_timer);
2467 hci_req_unlock(hdev);
2471 /* Flush RX and TX works */
2472 flush_work(&hdev->tx_work);
2473 flush_work(&hdev->rx_work);
2475 if (hdev->discov_timeout > 0) {
2476 cancel_delayed_work(&hdev->discov_off);
2477 hdev->discov_timeout = 0;
2478 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2479 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2482 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2483 cancel_delayed_work(&hdev->service_cache);
2485 cancel_delayed_work_sync(&hdev->le_scan_disable);
2487 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2488 cancel_delayed_work_sync(&hdev->rpa_expired);
2491 hci_inquiry_cache_flush(hdev);
2492 hci_conn_hash_flush(hdev);
2493 hci_pend_le_actions_clear(hdev);
2494 hci_dev_unlock(hdev);
2496 hci_notify(hdev, HCI_DEV_DOWN);
2502 skb_queue_purge(&hdev->cmd_q);
2503 atomic_set(&hdev->cmd_cnt, 1);
2504 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2505 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2506 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2507 set_bit(HCI_INIT, &hdev->flags);
2508 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2509 clear_bit(HCI_INIT, &hdev->flags);
2512 /* flush cmd work */
2513 flush_work(&hdev->cmd_work);
2516 skb_queue_purge(&hdev->rx_q);
2517 skb_queue_purge(&hdev->cmd_q);
2518 skb_queue_purge(&hdev->raw_q);
2520 /* Drop last sent command */
2521 if (hdev->sent_cmd) {
2522 cancel_delayed_work_sync(&hdev->cmd_timer);
2523 kfree_skb(hdev->sent_cmd);
2524 hdev->sent_cmd = NULL;
2527 kfree_skb(hdev->recv_evt);
2528 hdev->recv_evt = NULL;
2530 /* After this point our queues are empty
2531 * and no tasks are scheduled. */
2535 hdev->flags &= BIT(HCI_RAW);
2536 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2538 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2539 if (hdev->dev_type == HCI_BREDR) {
2541 mgmt_powered(hdev, 0);
2542 hci_dev_unlock(hdev);
2546 /* Controller radio is available but is currently powered down */
2547 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2549 memset(hdev->eir, 0, sizeof(hdev->eir));
2550 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2551 bacpy(&hdev->random_addr, BDADDR_ANY);
2553 hci_req_unlock(hdev);
2559 int hci_dev_close(__u16 dev)
2561 struct hci_dev *hdev;
2564 hdev = hci_dev_get(dev);
2568 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2573 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2574 cancel_delayed_work(&hdev->power_off);
2576 err = hci_dev_do_close(hdev);
2583 int hci_dev_reset(__u16 dev)
2585 struct hci_dev *hdev;
2588 hdev = hci_dev_get(dev);
2594 if (!test_bit(HCI_UP, &hdev->flags)) {
2599 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2604 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2610 skb_queue_purge(&hdev->rx_q);
2611 skb_queue_purge(&hdev->cmd_q);
2614 hci_inquiry_cache_flush(hdev);
2615 hci_conn_hash_flush(hdev);
2616 hci_dev_unlock(hdev);
2621 atomic_set(&hdev->cmd_cnt, 1);
2622 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2624 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2627 hci_req_unlock(hdev);
2632 int hci_dev_reset_stat(__u16 dev)
2634 struct hci_dev *hdev;
2637 hdev = hci_dev_get(dev);
2641 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2646 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2651 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2658 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2660 struct hci_dev *hdev;
2661 struct hci_dev_req dr;
2664 if (copy_from_user(&dr, arg, sizeof(dr)))
2667 hdev = hci_dev_get(dr.dev_id);
2671 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2676 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2681 if (hdev->dev_type != HCI_BREDR) {
2686 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2693 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2698 if (!lmp_encrypt_capable(hdev)) {
2703 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2704 /* Auth must be enabled first */
2705 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2711 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2716 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2719 /* Ensure that the connectable state gets correctly
2720 * notified if the whitelist is in use.
2722 if (!err && !list_empty(&hdev->whitelist)) {
2725 if ((dr.dev_opt & SCAN_PAGE))
2726 changed = !test_and_set_bit(HCI_CONNECTABLE,
2729 changed = test_and_set_bit(HCI_CONNECTABLE,
2733 mgmt_new_settings(hdev);
2738 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2742 case HCISETLINKMODE:
2743 hdev->link_mode = ((__u16) dr.dev_opt) &
2744 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2748 hdev->pkt_type = (__u16) dr.dev_opt;
2752 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2753 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2757 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2758 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2771 int hci_get_dev_list(void __user *arg)
2773 struct hci_dev *hdev;
2774 struct hci_dev_list_req *dl;
2775 struct hci_dev_req *dr;
2776 int n = 0, size, err;
2779 if (get_user(dev_num, (__u16 __user *) arg))
2782 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2785 size = sizeof(*dl) + dev_num * sizeof(*dr);
2787 dl = kzalloc(size, GFP_KERNEL);
2793 read_lock(&hci_dev_list_lock);
2794 list_for_each_entry(hdev, &hci_dev_list, list) {
2795 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2796 cancel_delayed_work(&hdev->power_off);
2798 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2799 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2801 (dr + n)->dev_id = hdev->id;
2802 (dr + n)->dev_opt = hdev->flags;
2807 read_unlock(&hci_dev_list_lock);
2810 size = sizeof(*dl) + n * sizeof(*dr);
2812 err = copy_to_user(arg, dl, size);
2815 return err ? -EFAULT : 0;
2818 int hci_get_dev_info(void __user *arg)
2820 struct hci_dev *hdev;
2821 struct hci_dev_info di;
2824 if (copy_from_user(&di, arg, sizeof(di)))
2827 hdev = hci_dev_get(di.dev_id);
2831 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2832 cancel_delayed_work_sync(&hdev->power_off);
2834 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2835 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2837 strcpy(di.name, hdev->name);
2838 di.bdaddr = hdev->bdaddr;
2839 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2840 di.flags = hdev->flags;
2841 di.pkt_type = hdev->pkt_type;
2842 if (lmp_bredr_capable(hdev)) {
2843 di.acl_mtu = hdev->acl_mtu;
2844 di.acl_pkts = hdev->acl_pkts;
2845 di.sco_mtu = hdev->sco_mtu;
2846 di.sco_pkts = hdev->sco_pkts;
2848 di.acl_mtu = hdev->le_mtu;
2849 di.acl_pkts = hdev->le_pkts;
2853 di.link_policy = hdev->link_policy;
2854 di.link_mode = hdev->link_mode;
2856 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2857 memcpy(&di.features, &hdev->features, sizeof(di.features));
2859 if (copy_to_user(arg, &di, sizeof(di)))
2867 /* ---- Interface to HCI drivers ---- */
2869 static int hci_rfkill_set_block(void *data, bool blocked)
2871 struct hci_dev *hdev = data;
2873 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2875 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2879 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2880 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2881 !test_bit(HCI_CONFIG, &hdev->dev_flags))
2882 hci_dev_do_close(hdev);
2884 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2890 static const struct rfkill_ops hci_rfkill_ops = {
2891 .set_block = hci_rfkill_set_block,
2894 static void hci_power_on(struct work_struct *work)
2896 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2899 BT_DBG("%s", hdev->name);
2901 err = hci_dev_do_open(hdev);
2903 mgmt_set_powered_failed(hdev, err);
2907 /* During the HCI setup phase, a few error conditions are
2908 * ignored and they need to be checked now. If they are still
2909 * valid, it is important to turn the device back off.
2911 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2912 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2913 (hdev->dev_type == HCI_BREDR &&
2914 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2915 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2916 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2917 hci_dev_do_close(hdev);
2918 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2919 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2920 HCI_AUTO_OFF_TIMEOUT);
2923 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2924 /* For unconfigured devices, set the HCI_RAW flag
2925 * so that userspace can easily identify them.
2927 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2928 set_bit(HCI_RAW, &hdev->flags);
2930 /* For fully configured devices, this will send
2931 * the Index Added event. For unconfigured devices,
2932 * it will send Unconfigued Index Added event.
2934 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2935 * and no event will be send.
2937 mgmt_index_added(hdev);
2938 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2939 /* When the controller is now configured, then it
2940 * is important to clear the HCI_RAW flag.
2942 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2943 clear_bit(HCI_RAW, &hdev->flags);
2945 /* Powering on the controller with HCI_CONFIG set only
2946 * happens with the transition from unconfigured to
2947 * configured. This will send the Index Added event.
2949 mgmt_index_added(hdev);
2953 static void hci_power_off(struct work_struct *work)
2955 struct hci_dev *hdev = container_of(work, struct hci_dev,
2958 BT_DBG("%s", hdev->name);
2960 hci_dev_do_close(hdev);
2963 static void hci_discov_off(struct work_struct *work)
2965 struct hci_dev *hdev;
2967 hdev = container_of(work, struct hci_dev, discov_off.work);
2969 BT_DBG("%s", hdev->name);
2971 mgmt_discoverable_timeout(hdev);
2974 void hci_uuids_clear(struct hci_dev *hdev)
2976 struct bt_uuid *uuid, *tmp;
2978 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2979 list_del(&uuid->list);
2984 void hci_link_keys_clear(struct hci_dev *hdev)
2986 struct list_head *p, *n;
2988 list_for_each_safe(p, n, &hdev->link_keys) {
2989 struct link_key *key;
2991 key = list_entry(p, struct link_key, list);
2998 void hci_smp_ltks_clear(struct hci_dev *hdev)
3000 struct smp_ltk *k, *tmp;
3002 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3008 void hci_smp_irks_clear(struct hci_dev *hdev)
3010 struct smp_irk *k, *tmp;
3012 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3018 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3022 list_for_each_entry(k, &hdev->link_keys, list)
3023 if (bacmp(bdaddr, &k->bdaddr) == 0)
3029 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3030 u8 key_type, u8 old_key_type)
3033 if (key_type < 0x03)
3036 /* Debug keys are insecure so don't store them persistently */
3037 if (key_type == HCI_LK_DEBUG_COMBINATION)
3040 /* Changed combination key and there's no previous one */
3041 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3044 /* Security mode 3 case */
3048 /* Neither local nor remote side had no-bonding as requirement */
3049 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3052 /* Local side had dedicated bonding as requirement */
3053 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3056 /* Remote side had dedicated bonding as requirement */
3057 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3060 /* If none of the above criteria match, then don't store the key
3065 static bool ltk_type_master(u8 type)
3067 return (type == SMP_LTK);
3070 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3075 list_for_each_entry(k, &hdev->long_term_keys, list) {
3076 if (k->ediv != ediv || k->rand != rand)
3079 if (ltk_type_master(k->type) != master)
3088 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3089 u8 addr_type, bool master)
3093 list_for_each_entry(k, &hdev->long_term_keys, list)
3094 if (addr_type == k->bdaddr_type &&
3095 bacmp(bdaddr, &k->bdaddr) == 0 &&
3096 ltk_type_master(k->type) == master)
3102 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3104 struct smp_irk *irk;
3106 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3107 if (!bacmp(&irk->rpa, rpa))
3111 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3112 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3113 bacpy(&irk->rpa, rpa);
3121 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3124 struct smp_irk *irk;
3126 /* Identity Address must be public or static random */
3127 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3130 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3131 if (addr_type == irk->addr_type &&
3132 bacmp(bdaddr, &irk->bdaddr) == 0)
3139 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3140 bdaddr_t *bdaddr, u8 *val, u8 type,
3141 u8 pin_len, bool *persistent)
3143 struct link_key *key, *old_key;
3146 old_key = hci_find_link_key(hdev, bdaddr);
3148 old_key_type = old_key->type;
3151 old_key_type = conn ? conn->key_type : 0xff;
3152 key = kzalloc(sizeof(*key), GFP_KERNEL);
3155 list_add(&key->list, &hdev->link_keys);
3158 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3160 /* Some buggy controller combinations generate a changed
3161 * combination key for legacy pairing even when there's no
3163 if (type == HCI_LK_CHANGED_COMBINATION &&
3164 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3165 type = HCI_LK_COMBINATION;
3167 conn->key_type = type;
3170 bacpy(&key->bdaddr, bdaddr);
3171 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3172 key->pin_len = pin_len;
3174 if (type == HCI_LK_CHANGED_COMBINATION)
3175 key->type = old_key_type;
3180 *persistent = hci_persistent_key(hdev, conn, type,
3186 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3187 u8 addr_type, u8 type, u8 authenticated,
3188 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3190 struct smp_ltk *key, *old_key;
3191 bool master = ltk_type_master(type);
3193 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3197 key = kzalloc(sizeof(*key), GFP_KERNEL);
3200 list_add(&key->list, &hdev->long_term_keys);
3203 bacpy(&key->bdaddr, bdaddr);
3204 key->bdaddr_type = addr_type;
3205 memcpy(key->val, tk, sizeof(key->val));
3206 key->authenticated = authenticated;
3209 key->enc_size = enc_size;
3215 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3216 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3218 struct smp_irk *irk;
3220 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3222 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3226 bacpy(&irk->bdaddr, bdaddr);
3227 irk->addr_type = addr_type;
3229 list_add(&irk->list, &hdev->identity_resolving_keys);
3232 memcpy(irk->val, val, 16);
3233 bacpy(&irk->rpa, rpa);
3238 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3240 struct link_key *key;
3242 key = hci_find_link_key(hdev, bdaddr);
3246 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3248 list_del(&key->list);
3254 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3256 struct smp_ltk *k, *tmp;
3259 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3260 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3263 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3270 return removed ? 0 : -ENOENT;
3273 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3275 struct smp_irk *k, *tmp;
3277 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3278 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3281 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3288 /* HCI command timer function */
3289 static void hci_cmd_timeout(struct work_struct *work)
3291 struct hci_dev *hdev = container_of(work, struct hci_dev,
3294 if (hdev->sent_cmd) {
3295 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3296 u16 opcode = __le16_to_cpu(sent->opcode);
3298 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3300 BT_ERR("%s command tx timeout", hdev->name);
3303 atomic_set(&hdev->cmd_cnt, 1);
3304 queue_work(hdev->workqueue, &hdev->cmd_work);
3307 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3310 struct oob_data *data;
3312 list_for_each_entry(data, &hdev->remote_oob_data, list)
3313 if (bacmp(bdaddr, &data->bdaddr) == 0)
3319 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3321 struct oob_data *data;
3323 data = hci_find_remote_oob_data(hdev, bdaddr);
3327 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3329 list_del(&data->list);
3335 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3337 struct oob_data *data, *n;
3339 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3340 list_del(&data->list);
3345 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3346 u8 *hash, u8 *randomizer)
3348 struct oob_data *data;
3350 data = hci_find_remote_oob_data(hdev, bdaddr);
3352 data = kmalloc(sizeof(*data), GFP_KERNEL);
3356 bacpy(&data->bdaddr, bdaddr);
3357 list_add(&data->list, &hdev->remote_oob_data);
3360 memcpy(data->hash192, hash, sizeof(data->hash192));
3361 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3363 memset(data->hash256, 0, sizeof(data->hash256));
3364 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3366 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3371 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3372 u8 *hash192, u8 *randomizer192,
3373 u8 *hash256, u8 *randomizer256)
3375 struct oob_data *data;
3377 data = hci_find_remote_oob_data(hdev, bdaddr);
3379 data = kmalloc(sizeof(*data), GFP_KERNEL);
3383 bacpy(&data->bdaddr, bdaddr);
3384 list_add(&data->list, &hdev->remote_oob_data);
3387 memcpy(data->hash192, hash192, sizeof(data->hash192));
3388 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3390 memcpy(data->hash256, hash256, sizeof(data->hash256));
3391 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3393 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3398 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3399 bdaddr_t *bdaddr, u8 type)
3401 struct bdaddr_list *b;
3403 list_for_each_entry(b, bdaddr_list, list) {
3404 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3411 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3413 struct list_head *p, *n;
3415 list_for_each_safe(p, n, bdaddr_list) {
3416 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3423 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3425 struct bdaddr_list *entry;
3427 if (!bacmp(bdaddr, BDADDR_ANY))
3430 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3433 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3437 bacpy(&entry->bdaddr, bdaddr);
3438 entry->bdaddr_type = type;
3440 list_add(&entry->list, list);
3445 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3447 struct bdaddr_list *entry;
3449 if (!bacmp(bdaddr, BDADDR_ANY)) {
3450 hci_bdaddr_list_clear(list);
3454 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3458 list_del(&entry->list);
3464 /* This function requires the caller holds hdev->lock */
3465 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3466 bdaddr_t *addr, u8 addr_type)
3468 struct hci_conn_params *params;
3470 /* The conn params list only contains identity addresses */
3471 if (!hci_is_identity_address(addr, addr_type))
3474 list_for_each_entry(params, &hdev->le_conn_params, list) {
3475 if (bacmp(¶ms->addr, addr) == 0 &&
3476 params->addr_type == addr_type) {
3484 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3486 struct hci_conn *conn;
3488 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3492 if (conn->dst_type != type)
3495 if (conn->state != BT_CONNECTED)
3501 /* This function requires the caller holds hdev->lock */
3502 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3503 bdaddr_t *addr, u8 addr_type)
3505 struct hci_conn_params *param;
3507 /* The list only contains identity addresses */
3508 if (!hci_is_identity_address(addr, addr_type))
3511 list_for_each_entry(param, list, action) {
3512 if (bacmp(¶m->addr, addr) == 0 &&
3513 param->addr_type == addr_type)
3520 /* This function requires the caller holds hdev->lock */
3521 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3522 bdaddr_t *addr, u8 addr_type)
3524 struct hci_conn_params *params;
3526 if (!hci_is_identity_address(addr, addr_type))
3529 params = hci_conn_params_lookup(hdev, addr, addr_type);
3533 params = kzalloc(sizeof(*params), GFP_KERNEL);
3535 BT_ERR("Out of memory");
3539 bacpy(¶ms->addr, addr);
3540 params->addr_type = addr_type;
3542 list_add(¶ms->list, &hdev->le_conn_params);
3543 INIT_LIST_HEAD(¶ms->action);
3545 params->conn_min_interval = hdev->le_conn_min_interval;
3546 params->conn_max_interval = hdev->le_conn_max_interval;
3547 params->conn_latency = hdev->le_conn_latency;
3548 params->supervision_timeout = hdev->le_supv_timeout;
3549 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3551 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3556 /* This function requires the caller holds hdev->lock */
3557 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3560 struct hci_conn_params *params;
3562 params = hci_conn_params_add(hdev, addr, addr_type);
3566 if (params->auto_connect == auto_connect)
3569 list_del_init(¶ms->action);
3571 switch (auto_connect) {
3572 case HCI_AUTO_CONN_DISABLED:
3573 case HCI_AUTO_CONN_LINK_LOSS:
3574 hci_update_background_scan(hdev);
3576 case HCI_AUTO_CONN_REPORT:
3577 list_add(¶ms->action, &hdev->pend_le_reports);
3578 hci_update_background_scan(hdev);
3580 case HCI_AUTO_CONN_ALWAYS:
3581 if (!is_connected(hdev, addr, addr_type)) {
3582 list_add(¶ms->action, &hdev->pend_le_conns);
3583 hci_update_background_scan(hdev);
3588 params->auto_connect = auto_connect;
3590 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3596 /* This function requires the caller holds hdev->lock */
3597 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3599 struct hci_conn_params *params;
3601 params = hci_conn_params_lookup(hdev, addr, addr_type);
3605 list_del(¶ms->action);
3606 list_del(¶ms->list);
3609 hci_update_background_scan(hdev);
3611 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3614 /* This function requires the caller holds hdev->lock */
3615 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3617 struct hci_conn_params *params, *tmp;
3619 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3620 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3622 list_del(¶ms->list);
3626 BT_DBG("All LE disabled connection parameters were removed");
3629 /* This function requires the caller holds hdev->lock */
3630 void hci_conn_params_clear_all(struct hci_dev *hdev)
3632 struct hci_conn_params *params, *tmp;
3634 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3635 list_del(¶ms->action);
3636 list_del(¶ms->list);
3640 hci_update_background_scan(hdev);
3642 BT_DBG("All LE connection parameters were removed");
3645 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3648 BT_ERR("Failed to start inquiry: status %d", status);
3651 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3652 hci_dev_unlock(hdev);
3657 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3659 /* General inquiry access code (GIAC) */
3660 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3661 struct hci_request req;
3662 struct hci_cp_inquiry cp;
3666 BT_ERR("Failed to disable LE scanning: status %d", status);
3670 switch (hdev->discovery.type) {
3671 case DISCOV_TYPE_LE:
3673 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3674 hci_dev_unlock(hdev);
3677 case DISCOV_TYPE_INTERLEAVED:
3678 hci_req_init(&req, hdev);
3680 memset(&cp, 0, sizeof(cp));
3681 memcpy(&cp.lap, lap, sizeof(cp.lap));
3682 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3683 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3687 hci_inquiry_cache_flush(hdev);
3689 err = hci_req_run(&req, inquiry_complete);
3691 BT_ERR("Inquiry request failed: err %d", err);
3692 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3695 hci_dev_unlock(hdev);
3700 static void le_scan_disable_work(struct work_struct *work)
3702 struct hci_dev *hdev = container_of(work, struct hci_dev,
3703 le_scan_disable.work);
3704 struct hci_request req;
3707 BT_DBG("%s", hdev->name);
3709 hci_req_init(&req, hdev);
3711 hci_req_add_le_scan_disable(&req);
3713 err = hci_req_run(&req, le_scan_disable_work_complete);
3715 BT_ERR("Disable LE scanning request failed: err %d", err);
3718 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3720 struct hci_dev *hdev = req->hdev;
3722 /* If we're advertising or initiating an LE connection we can't
3723 * go ahead and change the random address at this time. This is
3724 * because the eventual initiator address used for the
3725 * subsequently created connection will be undefined (some
3726 * controllers use the new address and others the one we had
3727 * when the operation started).
3729 * In this kind of scenario skip the update and let the random
3730 * address be updated at the next cycle.
3732 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3733 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3734 BT_DBG("Deferring random address update");
3738 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3741 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3744 struct hci_dev *hdev = req->hdev;
3747 /* If privacy is enabled use a resolvable private address. If
3748 * current RPA has expired or there is something else than
3749 * the current RPA in use, then generate a new one.
3751 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3754 *own_addr_type = ADDR_LE_DEV_RANDOM;
3756 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3757 !bacmp(&hdev->random_addr, &hdev->rpa))
3760 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3762 BT_ERR("%s failed to generate new RPA", hdev->name);
3766 set_random_addr(req, &hdev->rpa);
3768 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3769 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3774 /* In case of required privacy without resolvable private address,
3775 * use an unresolvable private address. This is useful for active
3776 * scanning and non-connectable advertising.
3778 if (require_privacy) {
3781 get_random_bytes(&urpa, 6);
3782 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3784 *own_addr_type = ADDR_LE_DEV_RANDOM;
3785 set_random_addr(req, &urpa);
3789 /* If forcing static address is in use or there is no public
3790 * address use the static address as random address (but skip
3791 * the HCI command if the current random address is already the
3794 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3795 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3796 *own_addr_type = ADDR_LE_DEV_RANDOM;
3797 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3798 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3799 &hdev->static_addr);
3803 /* Neither privacy nor static address is being used so use a
3806 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3811 /* Copy the Identity Address of the controller.
3813 * If the controller has a public BD_ADDR, then by default use that one.
3814 * If this is a LE only controller without a public address, default to
3815 * the static random address.
3817 * For debugging purposes it is possible to force controllers with a
3818 * public address to use the static random address instead.
3820 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3823 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3824 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3825 bacpy(bdaddr, &hdev->static_addr);
3826 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3828 bacpy(bdaddr, &hdev->bdaddr);
3829 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3833 /* Alloc HCI device */
3834 struct hci_dev *hci_alloc_dev(void)
3836 struct hci_dev *hdev;
3838 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3842 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3843 hdev->esco_type = (ESCO_HV1);
3844 hdev->link_mode = (HCI_LM_ACCEPT);
3845 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3846 hdev->io_capability = 0x03; /* No Input No Output */
3847 hdev->manufacturer = 0xffff; /* Default to internal use */
3848 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3849 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3851 hdev->sniff_max_interval = 800;
3852 hdev->sniff_min_interval = 80;
3854 hdev->le_adv_channel_map = 0x07;
3855 hdev->le_scan_interval = 0x0060;
3856 hdev->le_scan_window = 0x0030;
3857 hdev->le_conn_min_interval = 0x0028;
3858 hdev->le_conn_max_interval = 0x0038;
3859 hdev->le_conn_latency = 0x0000;
3860 hdev->le_supv_timeout = 0x002a;
3862 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3863 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3864 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3865 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3867 mutex_init(&hdev->lock);
3868 mutex_init(&hdev->req_lock);
3870 INIT_LIST_HEAD(&hdev->mgmt_pending);
3871 INIT_LIST_HEAD(&hdev->blacklist);
3872 INIT_LIST_HEAD(&hdev->whitelist);
3873 INIT_LIST_HEAD(&hdev->uuids);
3874 INIT_LIST_HEAD(&hdev->link_keys);
3875 INIT_LIST_HEAD(&hdev->long_term_keys);
3876 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3877 INIT_LIST_HEAD(&hdev->remote_oob_data);
3878 INIT_LIST_HEAD(&hdev->le_white_list);
3879 INIT_LIST_HEAD(&hdev->le_conn_params);
3880 INIT_LIST_HEAD(&hdev->pend_le_conns);
3881 INIT_LIST_HEAD(&hdev->pend_le_reports);
3882 INIT_LIST_HEAD(&hdev->conn_hash.list);
3884 INIT_WORK(&hdev->rx_work, hci_rx_work);
3885 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3886 INIT_WORK(&hdev->tx_work, hci_tx_work);
3887 INIT_WORK(&hdev->power_on, hci_power_on);
3889 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3890 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3891 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3893 skb_queue_head_init(&hdev->rx_q);
3894 skb_queue_head_init(&hdev->cmd_q);
3895 skb_queue_head_init(&hdev->raw_q);
3897 init_waitqueue_head(&hdev->req_wait_q);
3899 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3901 hci_init_sysfs(hdev);
3902 discovery_init(hdev);
3906 EXPORT_SYMBOL(hci_alloc_dev);
3908 /* Free HCI device */
3909 void hci_free_dev(struct hci_dev *hdev)
3911 /* will free via device release */
3912 put_device(&hdev->dev);
3914 EXPORT_SYMBOL(hci_free_dev);
3916 /* Register HCI device */
3917 int hci_register_dev(struct hci_dev *hdev)
3921 if (!hdev->open || !hdev->close || !hdev->send)
3924 /* Do not allow HCI_AMP devices to register at index 0,
3925 * so the index can be used as the AMP controller ID.
3927 switch (hdev->dev_type) {
3929 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3932 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3941 sprintf(hdev->name, "hci%d", id);
3944 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3946 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3947 WQ_MEM_RECLAIM, 1, hdev->name);
3948 if (!hdev->workqueue) {
3953 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3954 WQ_MEM_RECLAIM, 1, hdev->name);
3955 if (!hdev->req_workqueue) {
3956 destroy_workqueue(hdev->workqueue);
3961 if (!IS_ERR_OR_NULL(bt_debugfs))
3962 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3964 dev_set_name(&hdev->dev, "%s", hdev->name);
3966 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3968 if (IS_ERR(hdev->tfm_aes)) {
3969 BT_ERR("Unable to create crypto context");
3970 error = PTR_ERR(hdev->tfm_aes);
3971 hdev->tfm_aes = NULL;
3975 error = device_add(&hdev->dev);
3979 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3980 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3983 if (rfkill_register(hdev->rfkill) < 0) {
3984 rfkill_destroy(hdev->rfkill);
3985 hdev->rfkill = NULL;
3989 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3990 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3992 set_bit(HCI_SETUP, &hdev->dev_flags);
3993 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3995 if (hdev->dev_type == HCI_BREDR) {
3996 /* Assume BR/EDR support until proven otherwise (such as
3997 * through reading supported features during init.
3999 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4002 write_lock(&hci_dev_list_lock);
4003 list_add(&hdev->list, &hci_dev_list);
4004 write_unlock(&hci_dev_list_lock);
4006 /* Devices that are marked for raw-only usage are unconfigured
4007 * and should not be included in normal operation.
4009 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4010 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4012 hci_notify(hdev, HCI_DEV_REG);
4015 queue_work(hdev->req_workqueue, &hdev->power_on);
4020 crypto_free_blkcipher(hdev->tfm_aes);
4022 destroy_workqueue(hdev->workqueue);
4023 destroy_workqueue(hdev->req_workqueue);
4025 ida_simple_remove(&hci_index_ida, hdev->id);
4029 EXPORT_SYMBOL(hci_register_dev);
4031 /* Unregister HCI device */
4032 void hci_unregister_dev(struct hci_dev *hdev)
4036 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4038 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4042 write_lock(&hci_dev_list_lock);
4043 list_del(&hdev->list);
4044 write_unlock(&hci_dev_list_lock);
4046 hci_dev_do_close(hdev);
4048 for (i = 0; i < NUM_REASSEMBLY; i++)
4049 kfree_skb(hdev->reassembly[i]);
4051 cancel_work_sync(&hdev->power_on);
4053 if (!test_bit(HCI_INIT, &hdev->flags) &&
4054 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4055 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4057 mgmt_index_removed(hdev);
4058 hci_dev_unlock(hdev);
4061 /* mgmt_index_removed should take care of emptying the
4063 BUG_ON(!list_empty(&hdev->mgmt_pending));
4065 hci_notify(hdev, HCI_DEV_UNREG);
4068 rfkill_unregister(hdev->rfkill);
4069 rfkill_destroy(hdev->rfkill);
4073 crypto_free_blkcipher(hdev->tfm_aes);
4075 device_del(&hdev->dev);
4077 debugfs_remove_recursive(hdev->debugfs);
4079 destroy_workqueue(hdev->workqueue);
4080 destroy_workqueue(hdev->req_workqueue);
4083 hci_bdaddr_list_clear(&hdev->blacklist);
4084 hci_bdaddr_list_clear(&hdev->whitelist);
4085 hci_uuids_clear(hdev);
4086 hci_link_keys_clear(hdev);
4087 hci_smp_ltks_clear(hdev);
4088 hci_smp_irks_clear(hdev);
4089 hci_remote_oob_data_clear(hdev);
4090 hci_bdaddr_list_clear(&hdev->le_white_list);
4091 hci_conn_params_clear_all(hdev);
4092 hci_dev_unlock(hdev);
4096 ida_simple_remove(&hci_index_ida, id);
4098 EXPORT_SYMBOL(hci_unregister_dev);
4100 /* Suspend HCI device */
4101 int hci_suspend_dev(struct hci_dev *hdev)
4103 hci_notify(hdev, HCI_DEV_SUSPEND);
4106 EXPORT_SYMBOL(hci_suspend_dev);
4108 /* Resume HCI device */
4109 int hci_resume_dev(struct hci_dev *hdev)
4111 hci_notify(hdev, HCI_DEV_RESUME);
4114 EXPORT_SYMBOL(hci_resume_dev);
4116 /* Receive frame from HCI drivers */
4117 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4119 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4120 && !test_bit(HCI_INIT, &hdev->flags))) {
4126 bt_cb(skb)->incoming = 1;
4129 __net_timestamp(skb);
4131 skb_queue_tail(&hdev->rx_q, skb);
4132 queue_work(hdev->workqueue, &hdev->rx_work);
4136 EXPORT_SYMBOL(hci_recv_frame);
4138 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4139 int count, __u8 index)
4144 struct sk_buff *skb;
4145 struct bt_skb_cb *scb;
4147 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4148 index >= NUM_REASSEMBLY)
4151 skb = hdev->reassembly[index];
4155 case HCI_ACLDATA_PKT:
4156 len = HCI_MAX_FRAME_SIZE;
4157 hlen = HCI_ACL_HDR_SIZE;
4160 len = HCI_MAX_EVENT_SIZE;
4161 hlen = HCI_EVENT_HDR_SIZE;
4163 case HCI_SCODATA_PKT:
4164 len = HCI_MAX_SCO_SIZE;
4165 hlen = HCI_SCO_HDR_SIZE;
4169 skb = bt_skb_alloc(len, GFP_ATOMIC);
4173 scb = (void *) skb->cb;
4175 scb->pkt_type = type;
4177 hdev->reassembly[index] = skb;
4181 scb = (void *) skb->cb;
4182 len = min_t(uint, scb->expect, count);
4184 memcpy(skb_put(skb, len), data, len);
4193 if (skb->len == HCI_EVENT_HDR_SIZE) {
4194 struct hci_event_hdr *h = hci_event_hdr(skb);
4195 scb->expect = h->plen;
4197 if (skb_tailroom(skb) < scb->expect) {
4199 hdev->reassembly[index] = NULL;
4205 case HCI_ACLDATA_PKT:
4206 if (skb->len == HCI_ACL_HDR_SIZE) {
4207 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4208 scb->expect = __le16_to_cpu(h->dlen);
4210 if (skb_tailroom(skb) < scb->expect) {
4212 hdev->reassembly[index] = NULL;
4218 case HCI_SCODATA_PKT:
4219 if (skb->len == HCI_SCO_HDR_SIZE) {
4220 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4221 scb->expect = h->dlen;
4223 if (skb_tailroom(skb) < scb->expect) {
4225 hdev->reassembly[index] = NULL;
4232 if (scb->expect == 0) {
4233 /* Complete frame */
4235 bt_cb(skb)->pkt_type = type;
4236 hci_recv_frame(hdev, skb);
4238 hdev->reassembly[index] = NULL;
4246 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4250 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4254 rem = hci_reassembly(hdev, type, data, count, type - 1);
4258 data += (count - rem);
4264 EXPORT_SYMBOL(hci_recv_fragment);
4266 #define STREAM_REASSEMBLY 0
4268 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4274 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4277 struct { char type; } *pkt;
4279 /* Start of the frame */
4286 type = bt_cb(skb)->pkt_type;
4288 rem = hci_reassembly(hdev, type, data, count,
4293 data += (count - rem);
4299 EXPORT_SYMBOL(hci_recv_stream_fragment);
4301 /* ---- Interface to upper protocols ---- */
4303 int hci_register_cb(struct hci_cb *cb)
4305 BT_DBG("%p name %s", cb, cb->name);
4307 write_lock(&hci_cb_list_lock);
4308 list_add(&cb->list, &hci_cb_list);
4309 write_unlock(&hci_cb_list_lock);
4313 EXPORT_SYMBOL(hci_register_cb);
4315 int hci_unregister_cb(struct hci_cb *cb)
4317 BT_DBG("%p name %s", cb, cb->name);
4319 write_lock(&hci_cb_list_lock);
4320 list_del(&cb->list);
4321 write_unlock(&hci_cb_list_lock);
4325 EXPORT_SYMBOL(hci_unregister_cb);
4327 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4331 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4334 __net_timestamp(skb);
4336 /* Send copy to monitor */
4337 hci_send_to_monitor(hdev, skb);
4339 if (atomic_read(&hdev->promisc)) {
4340 /* Send copy to the sockets */
4341 hci_send_to_sock(hdev, skb);
4344 /* Get rid of skb owner, prior to sending to the driver. */
4347 err = hdev->send(hdev, skb);
4349 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4354 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4356 skb_queue_head_init(&req->cmd_q);
4361 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4363 struct hci_dev *hdev = req->hdev;
4364 struct sk_buff *skb;
4365 unsigned long flags;
4367 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4369 /* If an error occured during request building, remove all HCI
4370 * commands queued on the HCI request queue.
4373 skb_queue_purge(&req->cmd_q);
4377 /* Do not allow empty requests */
4378 if (skb_queue_empty(&req->cmd_q))
4381 skb = skb_peek_tail(&req->cmd_q);
4382 bt_cb(skb)->req.complete = complete;
4384 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4385 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4386 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4388 queue_work(hdev->workqueue, &hdev->cmd_work);
4393 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4394 u32 plen, const void *param)
4396 int len = HCI_COMMAND_HDR_SIZE + plen;
4397 struct hci_command_hdr *hdr;
4398 struct sk_buff *skb;
4400 skb = bt_skb_alloc(len, GFP_ATOMIC);
4404 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4405 hdr->opcode = cpu_to_le16(opcode);
4409 memcpy(skb_put(skb, plen), param, plen);
4411 BT_DBG("skb len %d", skb->len);
4413 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4418 /* Send HCI command */
4419 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4422 struct sk_buff *skb;
4424 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4426 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4428 BT_ERR("%s no memory for command", hdev->name);
4432 /* Stand-alone HCI commands must be flaged as
4433 * single-command requests.
4435 bt_cb(skb)->req.start = true;
4437 skb_queue_tail(&hdev->cmd_q, skb);
4438 queue_work(hdev->workqueue, &hdev->cmd_work);
4443 /* Queue a command to an asynchronous HCI request */
4444 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4445 const void *param, u8 event)
4447 struct hci_dev *hdev = req->hdev;
4448 struct sk_buff *skb;
4450 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4452 /* If an error occured during request building, there is no point in
4453 * queueing the HCI command. We can simply return.
4458 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4460 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4461 hdev->name, opcode);
4466 if (skb_queue_empty(&req->cmd_q))
4467 bt_cb(skb)->req.start = true;
4469 bt_cb(skb)->req.event = event;
4471 skb_queue_tail(&req->cmd_q, skb);
4474 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4477 hci_req_add_ev(req, opcode, plen, param, 0);
4480 /* Get data from the previously sent command */
4481 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4483 struct hci_command_hdr *hdr;
4485 if (!hdev->sent_cmd)
4488 hdr = (void *) hdev->sent_cmd->data;
4490 if (hdr->opcode != cpu_to_le16(opcode))
4493 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4495 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4499 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4501 struct hci_acl_hdr *hdr;
4504 skb_push(skb, HCI_ACL_HDR_SIZE);
4505 skb_reset_transport_header(skb);
4506 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4507 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4508 hdr->dlen = cpu_to_le16(len);
4511 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4512 struct sk_buff *skb, __u16 flags)
4514 struct hci_conn *conn = chan->conn;
4515 struct hci_dev *hdev = conn->hdev;
4516 struct sk_buff *list;
4518 skb->len = skb_headlen(skb);
4521 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4523 switch (hdev->dev_type) {
4525 hci_add_acl_hdr(skb, conn->handle, flags);
4528 hci_add_acl_hdr(skb, chan->handle, flags);
4531 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4535 list = skb_shinfo(skb)->frag_list;
4537 /* Non fragmented */
4538 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4540 skb_queue_tail(queue, skb);
4543 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4545 skb_shinfo(skb)->frag_list = NULL;
4547 /* Queue all fragments atomically */
4548 spin_lock(&queue->lock);
4550 __skb_queue_tail(queue, skb);
4552 flags &= ~ACL_START;
4555 skb = list; list = list->next;
4557 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4558 hci_add_acl_hdr(skb, conn->handle, flags);
4560 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4562 __skb_queue_tail(queue, skb);
4565 spin_unlock(&queue->lock);
4569 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4571 struct hci_dev *hdev = chan->conn->hdev;
4573 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4575 hci_queue_acl(chan, &chan->data_q, skb, flags);
4577 queue_work(hdev->workqueue, &hdev->tx_work);
4581 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4583 struct hci_dev *hdev = conn->hdev;
4584 struct hci_sco_hdr hdr;
4586 BT_DBG("%s len %d", hdev->name, skb->len);
4588 hdr.handle = cpu_to_le16(conn->handle);
4589 hdr.dlen = skb->len;
4591 skb_push(skb, HCI_SCO_HDR_SIZE);
4592 skb_reset_transport_header(skb);
4593 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4595 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4597 skb_queue_tail(&conn->data_q, skb);
4598 queue_work(hdev->workqueue, &hdev->tx_work);
4601 /* ---- HCI TX task (outgoing data) ---- */
4603 /* HCI Connection scheduler */
4604 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4607 struct hci_conn_hash *h = &hdev->conn_hash;
4608 struct hci_conn *conn = NULL, *c;
4609 unsigned int num = 0, min = ~0;
4611 /* We don't have to lock device here. Connections are always
4612 * added and removed with TX task disabled. */
4616 list_for_each_entry_rcu(c, &h->list, list) {
4617 if (c->type != type || skb_queue_empty(&c->data_q))
4620 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4625 if (c->sent < min) {
4630 if (hci_conn_num(hdev, type) == num)
4639 switch (conn->type) {
4641 cnt = hdev->acl_cnt;
4645 cnt = hdev->sco_cnt;
4648 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4652 BT_ERR("Unknown link type");
4660 BT_DBG("conn %p quote %d", conn, *quote);
4664 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4666 struct hci_conn_hash *h = &hdev->conn_hash;
4669 BT_ERR("%s link tx timeout", hdev->name);
4673 /* Kill stalled connections */
4674 list_for_each_entry_rcu(c, &h->list, list) {
4675 if (c->type == type && c->sent) {
4676 BT_ERR("%s killing stalled connection %pMR",
4677 hdev->name, &c->dst);
4678 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4685 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4688 struct hci_conn_hash *h = &hdev->conn_hash;
4689 struct hci_chan *chan = NULL;
4690 unsigned int num = 0, min = ~0, cur_prio = 0;
4691 struct hci_conn *conn;
4692 int cnt, q, conn_num = 0;
4694 BT_DBG("%s", hdev->name);
4698 list_for_each_entry_rcu(conn, &h->list, list) {
4699 struct hci_chan *tmp;
4701 if (conn->type != type)
4704 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4709 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4710 struct sk_buff *skb;
4712 if (skb_queue_empty(&tmp->data_q))
4715 skb = skb_peek(&tmp->data_q);
4716 if (skb->priority < cur_prio)
4719 if (skb->priority > cur_prio) {
4722 cur_prio = skb->priority;
4727 if (conn->sent < min) {
4733 if (hci_conn_num(hdev, type) == conn_num)
4742 switch (chan->conn->type) {
4744 cnt = hdev->acl_cnt;
4747 cnt = hdev->block_cnt;
4751 cnt = hdev->sco_cnt;
4754 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4758 BT_ERR("Unknown link type");
4763 BT_DBG("chan %p quote %d", chan, *quote);
4767 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4769 struct hci_conn_hash *h = &hdev->conn_hash;
4770 struct hci_conn *conn;
4773 BT_DBG("%s", hdev->name);
4777 list_for_each_entry_rcu(conn, &h->list, list) {
4778 struct hci_chan *chan;
4780 if (conn->type != type)
4783 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4788 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4789 struct sk_buff *skb;
4796 if (skb_queue_empty(&chan->data_q))
4799 skb = skb_peek(&chan->data_q);
4800 if (skb->priority >= HCI_PRIO_MAX - 1)
4803 skb->priority = HCI_PRIO_MAX - 1;
4805 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4809 if (hci_conn_num(hdev, type) == num)
4817 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4819 /* Calculate count of blocks used by this packet */
4820 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4823 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4825 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4826 /* ACL tx timeout must be longer than maximum
4827 * link supervision timeout (40.9 seconds) */
4828 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4829 HCI_ACL_TX_TIMEOUT))
4830 hci_link_tx_to(hdev, ACL_LINK);
4834 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4836 unsigned int cnt = hdev->acl_cnt;
4837 struct hci_chan *chan;
4838 struct sk_buff *skb;
4841 __check_timeout(hdev, cnt);
4843 while (hdev->acl_cnt &&
4844 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4845 u32 priority = (skb_peek(&chan->data_q))->priority;
4846 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4847 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4848 skb->len, skb->priority);
4850 /* Stop if priority has changed */
4851 if (skb->priority < priority)
4854 skb = skb_dequeue(&chan->data_q);
4856 hci_conn_enter_active_mode(chan->conn,
4857 bt_cb(skb)->force_active);
4859 hci_send_frame(hdev, skb);
4860 hdev->acl_last_tx = jiffies;
4868 if (cnt != hdev->acl_cnt)
4869 hci_prio_recalculate(hdev, ACL_LINK);
4872 static void hci_sched_acl_blk(struct hci_dev *hdev)
4874 unsigned int cnt = hdev->block_cnt;
4875 struct hci_chan *chan;
4876 struct sk_buff *skb;
4880 __check_timeout(hdev, cnt);
4882 BT_DBG("%s", hdev->name);
4884 if (hdev->dev_type == HCI_AMP)
4889 while (hdev->block_cnt > 0 &&
4890 (chan = hci_chan_sent(hdev, type, "e))) {
4891 u32 priority = (skb_peek(&chan->data_q))->priority;
4892 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4895 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4896 skb->len, skb->priority);
4898 /* Stop if priority has changed */
4899 if (skb->priority < priority)
4902 skb = skb_dequeue(&chan->data_q);
4904 blocks = __get_blocks(hdev, skb);
4905 if (blocks > hdev->block_cnt)
4908 hci_conn_enter_active_mode(chan->conn,
4909 bt_cb(skb)->force_active);
4911 hci_send_frame(hdev, skb);
4912 hdev->acl_last_tx = jiffies;
4914 hdev->block_cnt -= blocks;
4917 chan->sent += blocks;
4918 chan->conn->sent += blocks;
4922 if (cnt != hdev->block_cnt)
4923 hci_prio_recalculate(hdev, type);
4926 static void hci_sched_acl(struct hci_dev *hdev)
4928 BT_DBG("%s", hdev->name);
4930 /* No ACL link over BR/EDR controller */
4931 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4934 /* No AMP link over AMP controller */
4935 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4938 switch (hdev->flow_ctl_mode) {
4939 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4940 hci_sched_acl_pkt(hdev);
4943 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4944 hci_sched_acl_blk(hdev);
4950 static void hci_sched_sco(struct hci_dev *hdev)
4952 struct hci_conn *conn;
4953 struct sk_buff *skb;
4956 BT_DBG("%s", hdev->name);
4958 if (!hci_conn_num(hdev, SCO_LINK))
4961 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4962 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4963 BT_DBG("skb %p len %d", skb, skb->len);
4964 hci_send_frame(hdev, skb);
4967 if (conn->sent == ~0)
4973 static void hci_sched_esco(struct hci_dev *hdev)
4975 struct hci_conn *conn;
4976 struct sk_buff *skb;
4979 BT_DBG("%s", hdev->name);
4981 if (!hci_conn_num(hdev, ESCO_LINK))
4984 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4986 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4987 BT_DBG("skb %p len %d", skb, skb->len);
4988 hci_send_frame(hdev, skb);
4991 if (conn->sent == ~0)
4997 static void hci_sched_le(struct hci_dev *hdev)
4999 struct hci_chan *chan;
5000 struct sk_buff *skb;
5001 int quote, cnt, tmp;
5003 BT_DBG("%s", hdev->name);
5005 if (!hci_conn_num(hdev, LE_LINK))
5008 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5009 /* LE tx timeout must be longer than maximum
5010 * link supervision timeout (40.9 seconds) */
5011 if (!hdev->le_cnt && hdev->le_pkts &&
5012 time_after(jiffies, hdev->le_last_tx + HZ * 45))
5013 hci_link_tx_to(hdev, LE_LINK);
5016 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5018 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
5019 u32 priority = (skb_peek(&chan->data_q))->priority;
5020 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5021 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5022 skb->len, skb->priority);
5024 /* Stop if priority has changed */
5025 if (skb->priority < priority)
5028 skb = skb_dequeue(&chan->data_q);
5030 hci_send_frame(hdev, skb);
5031 hdev->le_last_tx = jiffies;
5042 hdev->acl_cnt = cnt;
5045 hci_prio_recalculate(hdev, LE_LINK);
5048 static void hci_tx_work(struct work_struct *work)
5050 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5051 struct sk_buff *skb;
5053 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5054 hdev->sco_cnt, hdev->le_cnt);
5056 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5057 /* Schedule queues and send stuff to HCI driver */
5058 hci_sched_acl(hdev);
5059 hci_sched_sco(hdev);
5060 hci_sched_esco(hdev);
5064 /* Send next queued raw (unknown type) packet */
5065 while ((skb = skb_dequeue(&hdev->raw_q)))
5066 hci_send_frame(hdev, skb);
5069 /* ----- HCI RX task (incoming data processing) ----- */
5071 /* ACL data packet */
5072 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5074 struct hci_acl_hdr *hdr = (void *) skb->data;
5075 struct hci_conn *conn;
5076 __u16 handle, flags;
5078 skb_pull(skb, HCI_ACL_HDR_SIZE);
5080 handle = __le16_to_cpu(hdr->handle);
5081 flags = hci_flags(handle);
5082 handle = hci_handle(handle);
5084 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5087 hdev->stat.acl_rx++;
5090 conn = hci_conn_hash_lookup_handle(hdev, handle);
5091 hci_dev_unlock(hdev);
5094 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5096 /* Send to upper protocol */
5097 l2cap_recv_acldata(conn, skb, flags);
5100 BT_ERR("%s ACL packet for unknown connection handle %d",
5101 hdev->name, handle);
5107 /* SCO data packet */
5108 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5110 struct hci_sco_hdr *hdr = (void *) skb->data;
5111 struct hci_conn *conn;
5114 skb_pull(skb, HCI_SCO_HDR_SIZE);
5116 handle = __le16_to_cpu(hdr->handle);
5118 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5120 hdev->stat.sco_rx++;
5123 conn = hci_conn_hash_lookup_handle(hdev, handle);
5124 hci_dev_unlock(hdev);
5127 /* Send to upper protocol */
5128 sco_recv_scodata(conn, skb);
5131 BT_ERR("%s SCO packet for unknown connection handle %d",
5132 hdev->name, handle);
5138 static bool hci_req_is_complete(struct hci_dev *hdev)
5140 struct sk_buff *skb;
5142 skb = skb_peek(&hdev->cmd_q);
5146 return bt_cb(skb)->req.start;
5149 static void hci_resend_last(struct hci_dev *hdev)
5151 struct hci_command_hdr *sent;
5152 struct sk_buff *skb;
5155 if (!hdev->sent_cmd)
5158 sent = (void *) hdev->sent_cmd->data;
5159 opcode = __le16_to_cpu(sent->opcode);
5160 if (opcode == HCI_OP_RESET)
5163 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5167 skb_queue_head(&hdev->cmd_q, skb);
5168 queue_work(hdev->workqueue, &hdev->cmd_work);
5171 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5173 hci_req_complete_t req_complete = NULL;
5174 struct sk_buff *skb;
5175 unsigned long flags;
5177 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5179 /* If the completed command doesn't match the last one that was
5180 * sent we need to do special handling of it.
5182 if (!hci_sent_cmd_data(hdev, opcode)) {
5183 /* Some CSR based controllers generate a spontaneous
5184 * reset complete event during init and any pending
5185 * command will never be completed. In such a case we
5186 * need to resend whatever was the last sent
5189 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5190 hci_resend_last(hdev);
5195 /* If the command succeeded and there's still more commands in
5196 * this request the request is not yet complete.
5198 if (!status && !hci_req_is_complete(hdev))
5201 /* If this was the last command in a request the complete
5202 * callback would be found in hdev->sent_cmd instead of the
5203 * command queue (hdev->cmd_q).
5205 if (hdev->sent_cmd) {
5206 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5209 /* We must set the complete callback to NULL to
5210 * avoid calling the callback more than once if
5211 * this function gets called again.
5213 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5219 /* Remove all pending commands belonging to this request */
5220 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5221 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5222 if (bt_cb(skb)->req.start) {
5223 __skb_queue_head(&hdev->cmd_q, skb);
5227 req_complete = bt_cb(skb)->req.complete;
5230 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5234 req_complete(hdev, status);
5237 static void hci_rx_work(struct work_struct *work)
5239 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5240 struct sk_buff *skb;
5242 BT_DBG("%s", hdev->name);
5244 while ((skb = skb_dequeue(&hdev->rx_q))) {
5245 /* Send copy to monitor */
5246 hci_send_to_monitor(hdev, skb);
5248 if (atomic_read(&hdev->promisc)) {
5249 /* Send copy to the sockets */
5250 hci_send_to_sock(hdev, skb);
5253 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5258 if (test_bit(HCI_INIT, &hdev->flags)) {
5259 /* Don't process data packets in this states. */
5260 switch (bt_cb(skb)->pkt_type) {
5261 case HCI_ACLDATA_PKT:
5262 case HCI_SCODATA_PKT:
5269 switch (bt_cb(skb)->pkt_type) {
5271 BT_DBG("%s Event packet", hdev->name);
5272 hci_event_packet(hdev, skb);
5275 case HCI_ACLDATA_PKT:
5276 BT_DBG("%s ACL data packet", hdev->name);
5277 hci_acldata_packet(hdev, skb);
5280 case HCI_SCODATA_PKT:
5281 BT_DBG("%s SCO data packet", hdev->name);
5282 hci_scodata_packet(hdev, skb);
5292 static void hci_cmd_work(struct work_struct *work)
5294 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5295 struct sk_buff *skb;
5297 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5298 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5300 /* Send queued commands */
5301 if (atomic_read(&hdev->cmd_cnt)) {
5302 skb = skb_dequeue(&hdev->cmd_q);
5306 kfree_skb(hdev->sent_cmd);
5308 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5309 if (hdev->sent_cmd) {
5310 atomic_dec(&hdev->cmd_cnt);
5311 hci_send_frame(hdev, skb);
5312 if (test_bit(HCI_RESET, &hdev->flags))
5313 cancel_delayed_work(&hdev->cmd_timer);
5315 schedule_delayed_work(&hdev->cmd_timer,
5318 skb_queue_head(&hdev->cmd_q, skb);
5319 queue_work(hdev->workqueue, &hdev->cmd_work);
5324 void hci_req_add_le_scan_disable(struct hci_request *req)
5326 struct hci_cp_le_set_scan_enable cp;
5328 memset(&cp, 0, sizeof(cp));
5329 cp.enable = LE_SCAN_DISABLE;
5330 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5333 void hci_req_add_le_passive_scan(struct hci_request *req)
5335 struct hci_cp_le_set_scan_param param_cp;
5336 struct hci_cp_le_set_scan_enable enable_cp;
5337 struct hci_dev *hdev = req->hdev;
5340 /* Set require_privacy to false since no SCAN_REQ are send
5341 * during passive scanning. Not using an unresolvable address
5342 * here is important so that peer devices using direct
5343 * advertising with our address will be correctly reported
5344 * by the controller.
5346 if (hci_update_random_address(req, false, &own_addr_type))
5349 memset(¶m_cp, 0, sizeof(param_cp));
5350 param_cp.type = LE_SCAN_PASSIVE;
5351 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5352 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5353 param_cp.own_address_type = own_addr_type;
5354 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5357 memset(&enable_cp, 0, sizeof(enable_cp));
5358 enable_cp.enable = LE_SCAN_ENABLE;
5359 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5360 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5364 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5367 BT_DBG("HCI request failed to update background scanning: "
5368 "status 0x%2.2x", status);
5371 /* This function controls the background scanning based on hdev->pend_le_conns
5372 * list. If there are pending LE connection we start the background scanning,
5373 * otherwise we stop it.
5375 * This function requires the caller holds hdev->lock.
5377 void hci_update_background_scan(struct hci_dev *hdev)
5379 struct hci_request req;
5380 struct hci_conn *conn;
5383 if (!test_bit(HCI_UP, &hdev->flags) ||
5384 test_bit(HCI_INIT, &hdev->flags) ||
5385 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5386 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5387 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5388 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5391 /* No point in doing scanning if LE support hasn't been enabled */
5392 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5395 /* If discovery is active don't interfere with it */
5396 if (hdev->discovery.state != DISCOVERY_STOPPED)
5399 hci_req_init(&req, hdev);
5401 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5402 list_empty(&hdev->pend_le_conns) &&
5403 list_empty(&hdev->pend_le_reports)) {
5404 /* If there is no pending LE connections or devices
5405 * to be scanned for, we should stop the background
5409 /* If controller is not scanning we are done. */
5410 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5413 hci_req_add_le_scan_disable(&req);
5415 BT_DBG("%s stopping background scanning", hdev->name);
5417 /* If there is at least one pending LE connection, we should
5418 * keep the background scan running.
5421 /* If controller is connecting, we should not start scanning
5422 * since some controllers are not able to scan and connect at
5425 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5429 /* If controller is currently scanning, we stop it to ensure we
5430 * don't miss any advertising (due to duplicates filter).
5432 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5433 hci_req_add_le_scan_disable(&req);
5435 hci_req_add_le_passive_scan(&req);
5437 BT_DBG("%s starting background scanning", hdev->name);
5440 err = hci_req_run(&req, update_background_scan_complete);
5442 BT_ERR("Failed to run HCI request: err %d", err);