2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_RWLOCK(hci_cb_list_lock);
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
59 /* ----- HCI requests ----- */
61 #define HCI_REQ_DONE 0
62 #define HCI_REQ_PEND 1
63 #define HCI_REQ_CANCELED 2
65 #define hci_req_lock(d) mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
68 /* ---- HCI notifications ---- */
70 static void hci_notify(struct hci_dev *hdev, int event)
72 hci_sock_dev_event(hdev, event);
75 /* ---- HCI debugfs entries ---- */
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
80 struct hci_dev *hdev = file->private_data;
83 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
92 struct hci_dev *hdev = file->private_data;
95 size_t buf_size = min(count, (sizeof(buf)-1));
99 if (!test_bit(HCI_UP, &hdev->flags))
102 if (copy_from_user(buf, user_buf, buf_size))
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
109 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
119 hci_req_unlock(hdev);
124 err = -bt_to_errno(skb->data[0]);
130 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
135 static const struct file_operations dut_mode_fops = {
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
142 static int features_show(struct seq_file *f, void *ptr)
144 struct hci_dev *hdev = f->private;
148 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
149 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
150 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
151 hdev->features[p][0], hdev->features[p][1],
152 hdev->features[p][2], hdev->features[p][3],
153 hdev->features[p][4], hdev->features[p][5],
154 hdev->features[p][6], hdev->features[p][7]);
156 if (lmp_le_capable(hdev))
157 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
158 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
159 hdev->le_features[0], hdev->le_features[1],
160 hdev->le_features[2], hdev->le_features[3],
161 hdev->le_features[4], hdev->le_features[5],
162 hdev->le_features[6], hdev->le_features[7]);
163 hci_dev_unlock(hdev);
168 static int features_open(struct inode *inode, struct file *file)
170 return single_open(file, features_show, inode->i_private);
173 static const struct file_operations features_fops = {
174 .open = features_open,
177 .release = single_release,
180 static int blacklist_show(struct seq_file *f, void *p)
182 struct hci_dev *hdev = f->private;
183 struct bdaddr_list *b;
186 list_for_each_entry(b, &hdev->blacklist, list)
187 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
188 hci_dev_unlock(hdev);
193 static int blacklist_open(struct inode *inode, struct file *file)
195 return single_open(file, blacklist_show, inode->i_private);
198 static const struct file_operations blacklist_fops = {
199 .open = blacklist_open,
202 .release = single_release,
205 static int uuids_show(struct seq_file *f, void *p)
207 struct hci_dev *hdev = f->private;
208 struct bt_uuid *uuid;
211 list_for_each_entry(uuid, &hdev->uuids, list) {
214 /* The Bluetooth UUID values are stored in big endian,
215 * but with reversed byte order. So convert them into
216 * the right order for the %pUb modifier.
218 for (i = 0; i < 16; i++)
219 val[i] = uuid->uuid[15 - i];
221 seq_printf(f, "%pUb\n", val);
223 hci_dev_unlock(hdev);
228 static int uuids_open(struct inode *inode, struct file *file)
230 return single_open(file, uuids_show, inode->i_private);
233 static const struct file_operations uuids_fops = {
237 .release = single_release,
240 static int inquiry_cache_show(struct seq_file *f, void *p)
242 struct hci_dev *hdev = f->private;
243 struct discovery_state *cache = &hdev->discovery;
244 struct inquiry_entry *e;
248 list_for_each_entry(e, &cache->all, all) {
249 struct inquiry_data *data = &e->data;
250 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
252 data->pscan_rep_mode, data->pscan_period_mode,
253 data->pscan_mode, data->dev_class[2],
254 data->dev_class[1], data->dev_class[0],
255 __le16_to_cpu(data->clock_offset),
256 data->rssi, data->ssp_mode, e->timestamp);
259 hci_dev_unlock(hdev);
264 static int inquiry_cache_open(struct inode *inode, struct file *file)
266 return single_open(file, inquiry_cache_show, inode->i_private);
269 static const struct file_operations inquiry_cache_fops = {
270 .open = inquiry_cache_open,
273 .release = single_release,
276 static int link_keys_show(struct seq_file *f, void *ptr)
278 struct hci_dev *hdev = f->private;
279 struct link_key *key;
282 list_for_each_entry_rcu(key, &hdev->link_keys, list)
283 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
284 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
290 static int link_keys_open(struct inode *inode, struct file *file)
292 return single_open(file, link_keys_show, inode->i_private);
295 static const struct file_operations link_keys_fops = {
296 .open = link_keys_open,
299 .release = single_release,
302 static int dev_class_show(struct seq_file *f, void *ptr)
304 struct hci_dev *hdev = f->private;
307 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
308 hdev->dev_class[1], hdev->dev_class[0]);
309 hci_dev_unlock(hdev);
314 static int dev_class_open(struct inode *inode, struct file *file)
316 return single_open(file, dev_class_show, inode->i_private);
319 static const struct file_operations dev_class_fops = {
320 .open = dev_class_open,
323 .release = single_release,
326 static int voice_setting_get(void *data, u64 *val)
328 struct hci_dev *hdev = data;
331 *val = hdev->voice_setting;
332 hci_dev_unlock(hdev);
337 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
338 NULL, "0x%4.4llx\n");
340 static int auto_accept_delay_set(void *data, u64 val)
342 struct hci_dev *hdev = data;
345 hdev->auto_accept_delay = val;
346 hci_dev_unlock(hdev);
351 static int auto_accept_delay_get(void *data, u64 *val)
353 struct hci_dev *hdev = data;
356 *val = hdev->auto_accept_delay;
357 hci_dev_unlock(hdev);
362 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
363 auto_accept_delay_set, "%llu\n");
365 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
366 size_t count, loff_t *ppos)
368 struct hci_dev *hdev = file->private_data;
371 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
374 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
377 static ssize_t force_sc_support_write(struct file *file,
378 const char __user *user_buf,
379 size_t count, loff_t *ppos)
381 struct hci_dev *hdev = file->private_data;
383 size_t buf_size = min(count, (sizeof(buf)-1));
386 if (test_bit(HCI_UP, &hdev->flags))
389 if (copy_from_user(buf, user_buf, buf_size))
392 buf[buf_size] = '\0';
393 if (strtobool(buf, &enable))
396 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
399 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
404 static const struct file_operations force_sc_support_fops = {
406 .read = force_sc_support_read,
407 .write = force_sc_support_write,
408 .llseek = default_llseek,
411 static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
412 size_t count, loff_t *ppos)
414 struct hci_dev *hdev = file->private_data;
417 buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
420 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
423 static ssize_t force_lesc_support_write(struct file *file,
424 const char __user *user_buf,
425 size_t count, loff_t *ppos)
427 struct hci_dev *hdev = file->private_data;
429 size_t buf_size = min(count, (sizeof(buf)-1));
432 if (copy_from_user(buf, user_buf, buf_size))
435 buf[buf_size] = '\0';
436 if (strtobool(buf, &enable))
439 if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
442 change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
447 static const struct file_operations force_lesc_support_fops = {
449 .read = force_lesc_support_read,
450 .write = force_lesc_support_write,
451 .llseek = default_llseek,
454 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
455 size_t count, loff_t *ppos)
457 struct hci_dev *hdev = file->private_data;
460 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
463 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
466 static const struct file_operations sc_only_mode_fops = {
468 .read = sc_only_mode_read,
469 .llseek = default_llseek,
472 static int idle_timeout_set(void *data, u64 val)
474 struct hci_dev *hdev = data;
476 if (val != 0 && (val < 500 || val > 3600000))
480 hdev->idle_timeout = val;
481 hci_dev_unlock(hdev);
486 static int idle_timeout_get(void *data, u64 *val)
488 struct hci_dev *hdev = data;
491 *val = hdev->idle_timeout;
492 hci_dev_unlock(hdev);
497 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
498 idle_timeout_set, "%llu\n");
500 static int rpa_timeout_set(void *data, u64 val)
502 struct hci_dev *hdev = data;
504 /* Require the RPA timeout to be at least 30 seconds and at most
507 if (val < 30 || val > (60 * 60 * 24))
511 hdev->rpa_timeout = val;
512 hci_dev_unlock(hdev);
517 static int rpa_timeout_get(void *data, u64 *val)
519 struct hci_dev *hdev = data;
522 *val = hdev->rpa_timeout;
523 hci_dev_unlock(hdev);
528 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
529 rpa_timeout_set, "%llu\n");
531 static int sniff_min_interval_set(void *data, u64 val)
533 struct hci_dev *hdev = data;
535 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
539 hdev->sniff_min_interval = val;
540 hci_dev_unlock(hdev);
545 static int sniff_min_interval_get(void *data, u64 *val)
547 struct hci_dev *hdev = data;
550 *val = hdev->sniff_min_interval;
551 hci_dev_unlock(hdev);
556 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
557 sniff_min_interval_set, "%llu\n");
559 static int sniff_max_interval_set(void *data, u64 val)
561 struct hci_dev *hdev = data;
563 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
567 hdev->sniff_max_interval = val;
568 hci_dev_unlock(hdev);
573 static int sniff_max_interval_get(void *data, u64 *val)
575 struct hci_dev *hdev = data;
578 *val = hdev->sniff_max_interval;
579 hci_dev_unlock(hdev);
584 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
585 sniff_max_interval_set, "%llu\n");
587 static int conn_info_min_age_set(void *data, u64 val)
589 struct hci_dev *hdev = data;
591 if (val == 0 || val > hdev->conn_info_max_age)
595 hdev->conn_info_min_age = val;
596 hci_dev_unlock(hdev);
601 static int conn_info_min_age_get(void *data, u64 *val)
603 struct hci_dev *hdev = data;
606 *val = hdev->conn_info_min_age;
607 hci_dev_unlock(hdev);
612 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
613 conn_info_min_age_set, "%llu\n");
615 static int conn_info_max_age_set(void *data, u64 val)
617 struct hci_dev *hdev = data;
619 if (val == 0 || val < hdev->conn_info_min_age)
623 hdev->conn_info_max_age = val;
624 hci_dev_unlock(hdev);
629 static int conn_info_max_age_get(void *data, u64 *val)
631 struct hci_dev *hdev = data;
634 *val = hdev->conn_info_max_age;
635 hci_dev_unlock(hdev);
640 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
641 conn_info_max_age_set, "%llu\n");
643 static int identity_show(struct seq_file *f, void *p)
645 struct hci_dev *hdev = f->private;
651 hci_copy_identity_address(hdev, &addr, &addr_type);
653 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
654 16, hdev->irk, &hdev->rpa);
656 hci_dev_unlock(hdev);
661 static int identity_open(struct inode *inode, struct file *file)
663 return single_open(file, identity_show, inode->i_private);
666 static const struct file_operations identity_fops = {
667 .open = identity_open,
670 .release = single_release,
673 static int random_address_show(struct seq_file *f, void *p)
675 struct hci_dev *hdev = f->private;
678 seq_printf(f, "%pMR\n", &hdev->random_addr);
679 hci_dev_unlock(hdev);
684 static int random_address_open(struct inode *inode, struct file *file)
686 return single_open(file, random_address_show, inode->i_private);
689 static const struct file_operations random_address_fops = {
690 .open = random_address_open,
693 .release = single_release,
696 static int static_address_show(struct seq_file *f, void *p)
698 struct hci_dev *hdev = f->private;
701 seq_printf(f, "%pMR\n", &hdev->static_addr);
702 hci_dev_unlock(hdev);
707 static int static_address_open(struct inode *inode, struct file *file)
709 return single_open(file, static_address_show, inode->i_private);
712 static const struct file_operations static_address_fops = {
713 .open = static_address_open,
716 .release = single_release,
719 static ssize_t force_static_address_read(struct file *file,
720 char __user *user_buf,
721 size_t count, loff_t *ppos)
723 struct hci_dev *hdev = file->private_data;
726 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
729 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
732 static ssize_t force_static_address_write(struct file *file,
733 const char __user *user_buf,
734 size_t count, loff_t *ppos)
736 struct hci_dev *hdev = file->private_data;
738 size_t buf_size = min(count, (sizeof(buf)-1));
741 if (test_bit(HCI_UP, &hdev->flags))
744 if (copy_from_user(buf, user_buf, buf_size))
747 buf[buf_size] = '\0';
748 if (strtobool(buf, &enable))
751 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
754 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
759 static const struct file_operations force_static_address_fops = {
761 .read = force_static_address_read,
762 .write = force_static_address_write,
763 .llseek = default_llseek,
766 static int white_list_show(struct seq_file *f, void *ptr)
768 struct hci_dev *hdev = f->private;
769 struct bdaddr_list *b;
772 list_for_each_entry(b, &hdev->le_white_list, list)
773 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
774 hci_dev_unlock(hdev);
779 static int white_list_open(struct inode *inode, struct file *file)
781 return single_open(file, white_list_show, inode->i_private);
784 static const struct file_operations white_list_fops = {
785 .open = white_list_open,
788 .release = single_release,
791 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
793 struct hci_dev *hdev = f->private;
797 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
798 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
799 &irk->bdaddr, irk->addr_type,
800 16, irk->val, &irk->rpa);
807 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
809 return single_open(file, identity_resolving_keys_show,
813 static const struct file_operations identity_resolving_keys_fops = {
814 .open = identity_resolving_keys_open,
817 .release = single_release,
820 static int long_term_keys_show(struct seq_file *f, void *ptr)
822 struct hci_dev *hdev = f->private;
826 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
827 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
828 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
829 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
830 __le64_to_cpu(ltk->rand), 16, ltk->val);
836 static int long_term_keys_open(struct inode *inode, struct file *file)
838 return single_open(file, long_term_keys_show, inode->i_private);
841 static const struct file_operations long_term_keys_fops = {
842 .open = long_term_keys_open,
845 .release = single_release,
848 static int conn_min_interval_set(void *data, u64 val)
850 struct hci_dev *hdev = data;
852 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
856 hdev->le_conn_min_interval = val;
857 hci_dev_unlock(hdev);
862 static int conn_min_interval_get(void *data, u64 *val)
864 struct hci_dev *hdev = data;
867 *val = hdev->le_conn_min_interval;
868 hci_dev_unlock(hdev);
873 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
874 conn_min_interval_set, "%llu\n");
876 static int conn_max_interval_set(void *data, u64 val)
878 struct hci_dev *hdev = data;
880 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
884 hdev->le_conn_max_interval = val;
885 hci_dev_unlock(hdev);
890 static int conn_max_interval_get(void *data, u64 *val)
892 struct hci_dev *hdev = data;
895 *val = hdev->le_conn_max_interval;
896 hci_dev_unlock(hdev);
901 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
902 conn_max_interval_set, "%llu\n");
904 static int conn_latency_set(void *data, u64 val)
906 struct hci_dev *hdev = data;
912 hdev->le_conn_latency = val;
913 hci_dev_unlock(hdev);
918 static int conn_latency_get(void *data, u64 *val)
920 struct hci_dev *hdev = data;
923 *val = hdev->le_conn_latency;
924 hci_dev_unlock(hdev);
929 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
930 conn_latency_set, "%llu\n");
932 static int supervision_timeout_set(void *data, u64 val)
934 struct hci_dev *hdev = data;
936 if (val < 0x000a || val > 0x0c80)
940 hdev->le_supv_timeout = val;
941 hci_dev_unlock(hdev);
946 static int supervision_timeout_get(void *data, u64 *val)
948 struct hci_dev *hdev = data;
951 *val = hdev->le_supv_timeout;
952 hci_dev_unlock(hdev);
957 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
958 supervision_timeout_set, "%llu\n");
960 static int adv_channel_map_set(void *data, u64 val)
962 struct hci_dev *hdev = data;
964 if (val < 0x01 || val > 0x07)
968 hdev->le_adv_channel_map = val;
969 hci_dev_unlock(hdev);
974 static int adv_channel_map_get(void *data, u64 *val)
976 struct hci_dev *hdev = data;
979 *val = hdev->le_adv_channel_map;
980 hci_dev_unlock(hdev);
985 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
986 adv_channel_map_set, "%llu\n");
988 static int adv_min_interval_set(void *data, u64 val)
990 struct hci_dev *hdev = data;
992 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
996 hdev->le_adv_min_interval = val;
997 hci_dev_unlock(hdev);
1002 static int adv_min_interval_get(void *data, u64 *val)
1004 struct hci_dev *hdev = data;
1007 *val = hdev->le_adv_min_interval;
1008 hci_dev_unlock(hdev);
1013 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1014 adv_min_interval_set, "%llu\n");
1016 static int adv_max_interval_set(void *data, u64 val)
1018 struct hci_dev *hdev = data;
1020 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1024 hdev->le_adv_max_interval = val;
1025 hci_dev_unlock(hdev);
1030 static int adv_max_interval_get(void *data, u64 *val)
1032 struct hci_dev *hdev = data;
1035 *val = hdev->le_adv_max_interval;
1036 hci_dev_unlock(hdev);
1041 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1042 adv_max_interval_set, "%llu\n");
1044 static int device_list_show(struct seq_file *f, void *ptr)
1046 struct hci_dev *hdev = f->private;
1047 struct hci_conn_params *p;
1048 struct bdaddr_list *b;
1051 list_for_each_entry(b, &hdev->whitelist, list)
1052 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
1053 list_for_each_entry(p, &hdev->le_conn_params, list) {
1054 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
1057 hci_dev_unlock(hdev);
1062 static int device_list_open(struct inode *inode, struct file *file)
1064 return single_open(file, device_list_show, inode->i_private);
1067 static const struct file_operations device_list_fops = {
1068 .open = device_list_open,
1070 .llseek = seq_lseek,
1071 .release = single_release,
1074 /* ---- HCI requests ---- */
1076 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1078 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1080 if (hdev->req_status == HCI_REQ_PEND) {
1081 hdev->req_result = result;
1082 hdev->req_status = HCI_REQ_DONE;
1083 wake_up_interruptible(&hdev->req_wait_q);
1087 static void hci_req_cancel(struct hci_dev *hdev, int err)
1089 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1091 if (hdev->req_status == HCI_REQ_PEND) {
1092 hdev->req_result = err;
1093 hdev->req_status = HCI_REQ_CANCELED;
1094 wake_up_interruptible(&hdev->req_wait_q);
1098 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1101 struct hci_ev_cmd_complete *ev;
1102 struct hci_event_hdr *hdr;
1103 struct sk_buff *skb;
1107 skb = hdev->recv_evt;
1108 hdev->recv_evt = NULL;
1110 hci_dev_unlock(hdev);
1113 return ERR_PTR(-ENODATA);
1115 if (skb->len < sizeof(*hdr)) {
1116 BT_ERR("Too short HCI event");
1120 hdr = (void *) skb->data;
1121 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1124 if (hdr->evt != event)
1129 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1130 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1134 if (skb->len < sizeof(*ev)) {
1135 BT_ERR("Too short cmd_complete event");
1139 ev = (void *) skb->data;
1140 skb_pull(skb, sizeof(*ev));
1142 if (opcode == __le16_to_cpu(ev->opcode))
1145 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1146 __le16_to_cpu(ev->opcode));
1150 return ERR_PTR(-ENODATA);
1153 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1154 const void *param, u8 event, u32 timeout)
1156 DECLARE_WAITQUEUE(wait, current);
1157 struct hci_request req;
1160 BT_DBG("%s", hdev->name);
1162 hci_req_init(&req, hdev);
1164 hci_req_add_ev(&req, opcode, plen, param, event);
1166 hdev->req_status = HCI_REQ_PEND;
1168 add_wait_queue(&hdev->req_wait_q, &wait);
1169 set_current_state(TASK_INTERRUPTIBLE);
1171 err = hci_req_run(&req, hci_req_sync_complete);
1173 remove_wait_queue(&hdev->req_wait_q, &wait);
1174 set_current_state(TASK_RUNNING);
1175 return ERR_PTR(err);
1178 schedule_timeout(timeout);
1180 remove_wait_queue(&hdev->req_wait_q, &wait);
1182 if (signal_pending(current))
1183 return ERR_PTR(-EINTR);
1185 switch (hdev->req_status) {
1187 err = -bt_to_errno(hdev->req_result);
1190 case HCI_REQ_CANCELED:
1191 err = -hdev->req_result;
1199 hdev->req_status = hdev->req_result = 0;
1201 BT_DBG("%s end: err %d", hdev->name, err);
1204 return ERR_PTR(err);
1206 return hci_get_cmd_complete(hdev, opcode, event);
1208 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1210 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1211 const void *param, u32 timeout)
1213 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1215 EXPORT_SYMBOL(__hci_cmd_sync);
1217 /* Execute request and wait for completion. */
1218 static int __hci_req_sync(struct hci_dev *hdev,
1219 void (*func)(struct hci_request *req,
1221 unsigned long opt, __u32 timeout)
1223 struct hci_request req;
1224 DECLARE_WAITQUEUE(wait, current);
1227 BT_DBG("%s start", hdev->name);
1229 hci_req_init(&req, hdev);
1231 hdev->req_status = HCI_REQ_PEND;
1235 add_wait_queue(&hdev->req_wait_q, &wait);
1236 set_current_state(TASK_INTERRUPTIBLE);
1238 err = hci_req_run(&req, hci_req_sync_complete);
1240 hdev->req_status = 0;
1242 remove_wait_queue(&hdev->req_wait_q, &wait);
1243 set_current_state(TASK_RUNNING);
1245 /* ENODATA means the HCI request command queue is empty.
1246 * This can happen when a request with conditionals doesn't
1247 * trigger any commands to be sent. This is normal behavior
1248 * and should not trigger an error return.
1250 if (err == -ENODATA)
1256 schedule_timeout(timeout);
1258 remove_wait_queue(&hdev->req_wait_q, &wait);
1260 if (signal_pending(current))
1263 switch (hdev->req_status) {
1265 err = -bt_to_errno(hdev->req_result);
1268 case HCI_REQ_CANCELED:
1269 err = -hdev->req_result;
1277 hdev->req_status = hdev->req_result = 0;
1279 BT_DBG("%s end: err %d", hdev->name, err);
1284 static int hci_req_sync(struct hci_dev *hdev,
1285 void (*req)(struct hci_request *req,
1287 unsigned long opt, __u32 timeout)
1291 if (!test_bit(HCI_UP, &hdev->flags))
1294 /* Serialize all requests */
1296 ret = __hci_req_sync(hdev, req, opt, timeout);
1297 hci_req_unlock(hdev);
1302 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1304 BT_DBG("%s %ld", req->hdev->name, opt);
1307 set_bit(HCI_RESET, &req->hdev->flags);
1308 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1311 static void bredr_init(struct hci_request *req)
1313 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1315 /* Read Local Supported Features */
1316 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1318 /* Read Local Version */
1319 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1321 /* Read BD Address */
1322 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1325 static void amp_init(struct hci_request *req)
1327 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1329 /* Read Local Version */
1330 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1332 /* Read Local Supported Commands */
1333 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1335 /* Read Local Supported Features */
1336 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1338 /* Read Local AMP Info */
1339 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1341 /* Read Data Blk size */
1342 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1344 /* Read Flow Control Mode */
1345 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1347 /* Read Location Data */
1348 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1351 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1353 struct hci_dev *hdev = req->hdev;
1355 BT_DBG("%s %ld", hdev->name, opt);
1358 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1359 hci_reset_req(req, 0);
1361 switch (hdev->dev_type) {
1371 BT_ERR("Unknown device type %d", hdev->dev_type);
1376 static void bredr_setup(struct hci_request *req)
1381 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1382 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1384 /* Read Class of Device */
1385 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1387 /* Read Local Name */
1388 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1390 /* Read Voice Setting */
1391 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1393 /* Read Number of Supported IAC */
1394 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1396 /* Read Current IAC LAP */
1397 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1399 /* Clear Event Filters */
1400 flt_type = HCI_FLT_CLEAR_ALL;
1401 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1403 /* Connection accept timeout ~20 secs */
1404 param = cpu_to_le16(0x7d00);
1405 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1408 static void le_setup(struct hci_request *req)
1410 struct hci_dev *hdev = req->hdev;
1412 /* Read LE Buffer Size */
1413 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1415 /* Read LE Local Supported Features */
1416 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1418 /* Read LE Supported States */
1419 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1421 /* Read LE White List Size */
1422 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1424 /* Clear LE White List */
1425 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1427 /* LE-only controllers have LE implicitly enabled */
1428 if (!lmp_bredr_capable(hdev))
1429 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1432 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1434 if (lmp_ext_inq_capable(hdev))
1437 if (lmp_inq_rssi_capable(hdev))
1440 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1441 hdev->lmp_subver == 0x0757)
1444 if (hdev->manufacturer == 15) {
1445 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1447 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1449 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1453 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1454 hdev->lmp_subver == 0x1805)
1460 static void hci_setup_inquiry_mode(struct hci_request *req)
1464 mode = hci_get_inquiry_mode(req->hdev);
1466 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1469 static void hci_setup_event_mask(struct hci_request *req)
1471 struct hci_dev *hdev = req->hdev;
1473 /* The second byte is 0xff instead of 0x9f (two reserved bits
1474 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1475 * command otherwise.
1477 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1479 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1480 * any event mask for pre 1.2 devices.
1482 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1485 if (lmp_bredr_capable(hdev)) {
1486 events[4] |= 0x01; /* Flow Specification Complete */
1487 events[4] |= 0x02; /* Inquiry Result with RSSI */
1488 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1489 events[5] |= 0x08; /* Synchronous Connection Complete */
1490 events[5] |= 0x10; /* Synchronous Connection Changed */
1492 /* Use a different default for LE-only devices */
1493 memset(events, 0, sizeof(events));
1494 events[0] |= 0x10; /* Disconnection Complete */
1495 events[1] |= 0x08; /* Read Remote Version Information Complete */
1496 events[1] |= 0x20; /* Command Complete */
1497 events[1] |= 0x40; /* Command Status */
1498 events[1] |= 0x80; /* Hardware Error */
1499 events[2] |= 0x04; /* Number of Completed Packets */
1500 events[3] |= 0x02; /* Data Buffer Overflow */
1502 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1503 events[0] |= 0x80; /* Encryption Change */
1504 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1508 if (lmp_inq_rssi_capable(hdev))
1509 events[4] |= 0x02; /* Inquiry Result with RSSI */
1511 if (lmp_sniffsubr_capable(hdev))
1512 events[5] |= 0x20; /* Sniff Subrating */
1514 if (lmp_pause_enc_capable(hdev))
1515 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1517 if (lmp_ext_inq_capable(hdev))
1518 events[5] |= 0x40; /* Extended Inquiry Result */
1520 if (lmp_no_flush_capable(hdev))
1521 events[7] |= 0x01; /* Enhanced Flush Complete */
1523 if (lmp_lsto_capable(hdev))
1524 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1526 if (lmp_ssp_capable(hdev)) {
1527 events[6] |= 0x01; /* IO Capability Request */
1528 events[6] |= 0x02; /* IO Capability Response */
1529 events[6] |= 0x04; /* User Confirmation Request */
1530 events[6] |= 0x08; /* User Passkey Request */
1531 events[6] |= 0x10; /* Remote OOB Data Request */
1532 events[6] |= 0x20; /* Simple Pairing Complete */
1533 events[7] |= 0x04; /* User Passkey Notification */
1534 events[7] |= 0x08; /* Keypress Notification */
1535 events[7] |= 0x10; /* Remote Host Supported
1536 * Features Notification
1540 if (lmp_le_capable(hdev))
1541 events[7] |= 0x20; /* LE Meta-Event */
1543 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1546 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1548 struct hci_dev *hdev = req->hdev;
1550 if (lmp_bredr_capable(hdev))
1553 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1555 if (lmp_le_capable(hdev))
1558 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1559 * local supported commands HCI command.
1561 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1562 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1564 if (lmp_ssp_capable(hdev)) {
1565 /* When SSP is available, then the host features page
1566 * should also be available as well. However some
1567 * controllers list the max_page as 0 as long as SSP
1568 * has not been enabled. To achieve proper debugging
1569 * output, force the minimum max_page to 1 at least.
1571 hdev->max_page = 0x01;
1573 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1575 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1576 sizeof(mode), &mode);
1578 struct hci_cp_write_eir cp;
1580 memset(hdev->eir, 0, sizeof(hdev->eir));
1581 memset(&cp, 0, sizeof(cp));
1583 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1587 if (lmp_inq_rssi_capable(hdev))
1588 hci_setup_inquiry_mode(req);
1590 if (lmp_inq_tx_pwr_capable(hdev))
1591 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1593 if (lmp_ext_feat_capable(hdev)) {
1594 struct hci_cp_read_local_ext_features cp;
1597 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1601 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1603 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1608 static void hci_setup_link_policy(struct hci_request *req)
1610 struct hci_dev *hdev = req->hdev;
1611 struct hci_cp_write_def_link_policy cp;
1612 u16 link_policy = 0;
1614 if (lmp_rswitch_capable(hdev))
1615 link_policy |= HCI_LP_RSWITCH;
1616 if (lmp_hold_capable(hdev))
1617 link_policy |= HCI_LP_HOLD;
1618 if (lmp_sniff_capable(hdev))
1619 link_policy |= HCI_LP_SNIFF;
1620 if (lmp_park_capable(hdev))
1621 link_policy |= HCI_LP_PARK;
1623 cp.policy = cpu_to_le16(link_policy);
1624 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1627 static void hci_set_le_support(struct hci_request *req)
1629 struct hci_dev *hdev = req->hdev;
1630 struct hci_cp_write_le_host_supported cp;
1632 /* LE-only devices do not support explicit enablement */
1633 if (!lmp_bredr_capable(hdev))
1636 memset(&cp, 0, sizeof(cp));
1638 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1643 if (cp.le != lmp_host_le_capable(hdev))
1644 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1648 static void hci_set_event_mask_page_2(struct hci_request *req)
1650 struct hci_dev *hdev = req->hdev;
1651 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1653 /* If Connectionless Slave Broadcast master role is supported
1654 * enable all necessary events for it.
1656 if (lmp_csb_master_capable(hdev)) {
1657 events[1] |= 0x40; /* Triggered Clock Capture */
1658 events[1] |= 0x80; /* Synchronization Train Complete */
1659 events[2] |= 0x10; /* Slave Page Response Timeout */
1660 events[2] |= 0x20; /* CSB Channel Map Change */
1663 /* If Connectionless Slave Broadcast slave role is supported
1664 * enable all necessary events for it.
1666 if (lmp_csb_slave_capable(hdev)) {
1667 events[2] |= 0x01; /* Synchronization Train Received */
1668 events[2] |= 0x02; /* CSB Receive */
1669 events[2] |= 0x04; /* CSB Timeout */
1670 events[2] |= 0x08; /* Truncated Page Complete */
1673 /* Enable Authenticated Payload Timeout Expired event if supported */
1674 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1677 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1680 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1682 struct hci_dev *hdev = req->hdev;
1685 hci_setup_event_mask(req);
1687 /* Some Broadcom based Bluetooth controllers do not support the
1688 * Delete Stored Link Key command. They are clearly indicating its
1689 * absence in the bit mask of supported commands.
1691 * Check the supported commands and only if the the command is marked
1692 * as supported send it. If not supported assume that the controller
1693 * does not have actual support for stored link keys which makes this
1694 * command redundant anyway.
1696 * Some controllers indicate that they support handling deleting
1697 * stored link keys, but they don't. The quirk lets a driver
1698 * just disable this command.
1700 if (hdev->commands[6] & 0x80 &&
1701 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1702 struct hci_cp_delete_stored_link_key cp;
1704 bacpy(&cp.bdaddr, BDADDR_ANY);
1705 cp.delete_all = 0x01;
1706 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1710 if (hdev->commands[5] & 0x10)
1711 hci_setup_link_policy(req);
1713 if (hdev->commands[8] & 0x01)
1714 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1716 /* Some older Broadcom based Bluetooth 1.2 controllers do not
1717 * support the Read Page Scan Type command. Check support for
1718 * this command in the bit mask of supported commands.
1720 if (hdev->commands[13] & 0x01)
1721 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1723 if (lmp_le_capable(hdev)) {
1726 memset(events, 0, sizeof(events));
1729 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1730 events[0] |= 0x10; /* LE Long Term Key Request */
1732 /* If controller supports the Connection Parameters Request
1733 * Link Layer Procedure, enable the corresponding event.
1735 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1736 events[0] |= 0x20; /* LE Remote Connection
1740 /* If the controller supports Extended Scanner Filter
1741 * Policies, enable the correspondig event.
1743 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
1744 events[1] |= 0x04; /* LE Direct Advertising
1748 /* If the controller supports the LE Read Local P-256
1749 * Public Key command, enable the corresponding event.
1751 if (hdev->commands[34] & 0x02)
1752 events[0] |= 0x80; /* LE Read Local P-256
1753 * Public Key Complete
1756 /* If the controller supports the LE Generate DHKey
1757 * command, enable the corresponding event.
1759 if (hdev->commands[34] & 0x04)
1760 events[1] |= 0x01; /* LE Generate DHKey Complete */
1762 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1765 if (hdev->commands[25] & 0x40) {
1766 /* Read LE Advertising Channel TX Power */
1767 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1770 hci_set_le_support(req);
1773 /* Read features beyond page 1 if available */
1774 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1775 struct hci_cp_read_local_ext_features cp;
1778 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1783 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1785 struct hci_dev *hdev = req->hdev;
1787 /* Set event mask page 2 if the HCI command for it is supported */
1788 if (hdev->commands[22] & 0x04)
1789 hci_set_event_mask_page_2(req);
1791 /* Read local codec list if the HCI command is supported */
1792 if (hdev->commands[29] & 0x20)
1793 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1795 /* Get MWS transport configuration if the HCI command is supported */
1796 if (hdev->commands[30] & 0x08)
1797 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1799 /* Check for Synchronization Train support */
1800 if (lmp_sync_train_capable(hdev))
1801 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1803 /* Enable Secure Connections if supported and configured */
1804 if (bredr_sc_enabled(hdev)) {
1806 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1807 sizeof(support), &support);
1811 static int __hci_init(struct hci_dev *hdev)
1815 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1819 /* The Device Under Test (DUT) mode is special and available for
1820 * all controller types. So just create it early on.
1822 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1823 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1827 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1828 * BR/EDR/LE type controllers. AMP controllers only need the
1831 if (hdev->dev_type != HCI_BREDR)
1834 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1838 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1842 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1846 /* Only create debugfs entries during the initial setup
1847 * phase and not every time the controller gets powered on.
1849 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1852 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1854 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1855 &hdev->manufacturer);
1856 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1857 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1858 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1860 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1862 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1864 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1865 &conn_info_min_age_fops);
1866 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1867 &conn_info_max_age_fops);
1869 hci_debugfs_create_common(hdev);
1871 if (lmp_bredr_capable(hdev)) {
1872 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1873 hdev, &inquiry_cache_fops);
1874 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1875 hdev, &link_keys_fops);
1876 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1877 hdev, &dev_class_fops);
1878 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1879 hdev, &voice_setting_fops);
1881 hci_debugfs_create_bredr(hdev);
1884 if (lmp_ssp_capable(hdev)) {
1885 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1886 hdev, &auto_accept_delay_fops);
1887 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1888 hdev, &force_sc_support_fops);
1889 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1890 hdev, &sc_only_mode_fops);
1891 if (lmp_le_capable(hdev))
1892 debugfs_create_file("force_lesc_support", 0644,
1893 hdev->debugfs, hdev,
1894 &force_lesc_support_fops);
1897 if (lmp_sniff_capable(hdev)) {
1898 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1899 hdev, &idle_timeout_fops);
1900 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1901 hdev, &sniff_min_interval_fops);
1902 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1903 hdev, &sniff_max_interval_fops);
1906 if (lmp_le_capable(hdev)) {
1907 debugfs_create_file("identity", 0400, hdev->debugfs,
1908 hdev, &identity_fops);
1909 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1910 hdev, &rpa_timeout_fops);
1911 debugfs_create_file("random_address", 0444, hdev->debugfs,
1912 hdev, &random_address_fops);
1913 debugfs_create_file("static_address", 0444, hdev->debugfs,
1914 hdev, &static_address_fops);
1916 /* For controllers with a public address, provide a debug
1917 * option to force the usage of the configured static
1918 * address. By default the public address is used.
1920 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1921 debugfs_create_file("force_static_address", 0644,
1922 hdev->debugfs, hdev,
1923 &force_static_address_fops);
1925 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1926 &hdev->le_white_list_size);
1927 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1929 debugfs_create_file("identity_resolving_keys", 0400,
1930 hdev->debugfs, hdev,
1931 &identity_resolving_keys_fops);
1932 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1933 hdev, &long_term_keys_fops);
1934 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1935 hdev, &conn_min_interval_fops);
1936 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1937 hdev, &conn_max_interval_fops);
1938 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1939 hdev, &conn_latency_fops);
1940 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1941 hdev, &supervision_timeout_fops);
1942 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1943 hdev, &adv_channel_map_fops);
1944 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1945 hdev, &adv_min_interval_fops);
1946 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1947 hdev, &adv_max_interval_fops);
1948 debugfs_create_u16("discov_interleaved_timeout", 0644,
1950 &hdev->discov_interleaved_timeout);
1952 hci_debugfs_create_le(hdev);
1960 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1962 struct hci_dev *hdev = req->hdev;
1964 BT_DBG("%s %ld", hdev->name, opt);
1967 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1968 hci_reset_req(req, 0);
1970 /* Read Local Version */
1971 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1973 /* Read BD Address */
1974 if (hdev->set_bdaddr)
1975 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1978 static int __hci_unconf_init(struct hci_dev *hdev)
1982 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1985 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1992 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1996 BT_DBG("%s %x", req->hdev->name, scan);
1998 /* Inquiry and Page scans */
1999 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2002 static void hci_auth_req(struct hci_request *req, unsigned long opt)
2006 BT_DBG("%s %x", req->hdev->name, auth);
2008 /* Authentication */
2009 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
2012 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
2016 BT_DBG("%s %x", req->hdev->name, encrypt);
2019 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
2022 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
2024 __le16 policy = cpu_to_le16(opt);
2026 BT_DBG("%s %x", req->hdev->name, policy);
2028 /* Default link policy */
2029 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
2032 /* Get HCI device by index.
2033 * Device is held on return. */
2034 struct hci_dev *hci_dev_get(int index)
2036 struct hci_dev *hdev = NULL, *d;
2038 BT_DBG("%d", index);
2043 read_lock(&hci_dev_list_lock);
2044 list_for_each_entry(d, &hci_dev_list, list) {
2045 if (d->id == index) {
2046 hdev = hci_dev_hold(d);
2050 read_unlock(&hci_dev_list_lock);
2054 /* ---- Inquiry support ---- */
2056 bool hci_discovery_active(struct hci_dev *hdev)
2058 struct discovery_state *discov = &hdev->discovery;
2060 switch (discov->state) {
2061 case DISCOVERY_FINDING:
2062 case DISCOVERY_RESOLVING:
2070 void hci_discovery_set_state(struct hci_dev *hdev, int state)
2072 int old_state = hdev->discovery.state;
2074 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2076 if (old_state == state)
2079 hdev->discovery.state = state;
2082 case DISCOVERY_STOPPED:
2083 hci_update_background_scan(hdev);
2085 if (old_state != DISCOVERY_STARTING)
2086 mgmt_discovering(hdev, 0);
2088 case DISCOVERY_STARTING:
2090 case DISCOVERY_FINDING:
2091 mgmt_discovering(hdev, 1);
2093 case DISCOVERY_RESOLVING:
2095 case DISCOVERY_STOPPING:
2100 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2102 struct discovery_state *cache = &hdev->discovery;
2103 struct inquiry_entry *p, *n;
2105 list_for_each_entry_safe(p, n, &cache->all, all) {
2110 INIT_LIST_HEAD(&cache->unknown);
2111 INIT_LIST_HEAD(&cache->resolve);
2114 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2117 struct discovery_state *cache = &hdev->discovery;
2118 struct inquiry_entry *e;
2120 BT_DBG("cache %p, %pMR", cache, bdaddr);
2122 list_for_each_entry(e, &cache->all, all) {
2123 if (!bacmp(&e->data.bdaddr, bdaddr))
2130 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2133 struct discovery_state *cache = &hdev->discovery;
2134 struct inquiry_entry *e;
2136 BT_DBG("cache %p, %pMR", cache, bdaddr);
2138 list_for_each_entry(e, &cache->unknown, list) {
2139 if (!bacmp(&e->data.bdaddr, bdaddr))
2146 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2150 struct discovery_state *cache = &hdev->discovery;
2151 struct inquiry_entry *e;
2153 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2155 list_for_each_entry(e, &cache->resolve, list) {
2156 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2158 if (!bacmp(&e->data.bdaddr, bdaddr))
2165 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2166 struct inquiry_entry *ie)
2168 struct discovery_state *cache = &hdev->discovery;
2169 struct list_head *pos = &cache->resolve;
2170 struct inquiry_entry *p;
2172 list_del(&ie->list);
2174 list_for_each_entry(p, &cache->resolve, list) {
2175 if (p->name_state != NAME_PENDING &&
2176 abs(p->data.rssi) >= abs(ie->data.rssi))
2181 list_add(&ie->list, pos);
2184 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2187 struct discovery_state *cache = &hdev->discovery;
2188 struct inquiry_entry *ie;
2191 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2193 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2195 if (!data->ssp_mode)
2196 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2198 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2200 if (!ie->data.ssp_mode)
2201 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2203 if (ie->name_state == NAME_NEEDED &&
2204 data->rssi != ie->data.rssi) {
2205 ie->data.rssi = data->rssi;
2206 hci_inquiry_cache_update_resolve(hdev, ie);
2212 /* Entry not in the cache. Add new one. */
2213 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2215 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2219 list_add(&ie->all, &cache->all);
2222 ie->name_state = NAME_KNOWN;
2224 ie->name_state = NAME_NOT_KNOWN;
2225 list_add(&ie->list, &cache->unknown);
2229 if (name_known && ie->name_state != NAME_KNOWN &&
2230 ie->name_state != NAME_PENDING) {
2231 ie->name_state = NAME_KNOWN;
2232 list_del(&ie->list);
2235 memcpy(&ie->data, data, sizeof(*data));
2236 ie->timestamp = jiffies;
2237 cache->timestamp = jiffies;
2239 if (ie->name_state == NAME_NOT_KNOWN)
2240 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2246 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2248 struct discovery_state *cache = &hdev->discovery;
2249 struct inquiry_info *info = (struct inquiry_info *) buf;
2250 struct inquiry_entry *e;
2253 list_for_each_entry(e, &cache->all, all) {
2254 struct inquiry_data *data = &e->data;
2259 bacpy(&info->bdaddr, &data->bdaddr);
2260 info->pscan_rep_mode = data->pscan_rep_mode;
2261 info->pscan_period_mode = data->pscan_period_mode;
2262 info->pscan_mode = data->pscan_mode;
2263 memcpy(info->dev_class, data->dev_class, 3);
2264 info->clock_offset = data->clock_offset;
2270 BT_DBG("cache %p, copied %d", cache, copied);
2274 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2276 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2277 struct hci_dev *hdev = req->hdev;
2278 struct hci_cp_inquiry cp;
2280 BT_DBG("%s", hdev->name);
2282 if (test_bit(HCI_INQUIRY, &hdev->flags))
2286 memcpy(&cp.lap, &ir->lap, 3);
2287 cp.length = ir->length;
2288 cp.num_rsp = ir->num_rsp;
2289 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2292 int hci_inquiry(void __user *arg)
2294 __u8 __user *ptr = arg;
2295 struct hci_inquiry_req ir;
2296 struct hci_dev *hdev;
2297 int err = 0, do_inquiry = 0, max_rsp;
2301 if (copy_from_user(&ir, ptr, sizeof(ir)))
2304 hdev = hci_dev_get(ir.dev_id);
2308 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2313 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2318 if (hdev->dev_type != HCI_BREDR) {
2323 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2329 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2330 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2331 hci_inquiry_cache_flush(hdev);
2334 hci_dev_unlock(hdev);
2336 timeo = ir.length * msecs_to_jiffies(2000);
2339 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2344 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2345 * cleared). If it is interrupted by a signal, return -EINTR.
2347 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2348 TASK_INTERRUPTIBLE))
2352 /* for unlimited number of responses we will use buffer with
2355 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2357 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2358 * copy it to the user space.
2360 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2367 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2368 hci_dev_unlock(hdev);
2370 BT_DBG("num_rsp %d", ir.num_rsp);
2372 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2374 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2387 static int hci_dev_do_open(struct hci_dev *hdev)
2391 BT_DBG("%s %p", hdev->name, hdev);
2395 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2400 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2401 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2402 /* Check for rfkill but allow the HCI setup stage to
2403 * proceed (which in itself doesn't cause any RF activity).
2405 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2410 /* Check for valid public address or a configured static
2411 * random adddress, but let the HCI setup proceed to
2412 * be able to determine if there is a public address
2415 * In case of user channel usage, it is not important
2416 * if a public address or static random address is
2419 * This check is only valid for BR/EDR controllers
2420 * since AMP controllers do not have an address.
2422 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2423 hdev->dev_type == HCI_BREDR &&
2424 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2425 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2426 ret = -EADDRNOTAVAIL;
2431 if (test_bit(HCI_UP, &hdev->flags)) {
2436 if (hdev->open(hdev)) {
2441 atomic_set(&hdev->cmd_cnt, 1);
2442 set_bit(HCI_INIT, &hdev->flags);
2444 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2446 ret = hdev->setup(hdev);
2448 /* The transport driver can set these quirks before
2449 * creating the HCI device or in its setup callback.
2451 * In case any of them is set, the controller has to
2452 * start up as unconfigured.
2454 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2455 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2456 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2458 /* For an unconfigured controller it is required to
2459 * read at least the version information provided by
2460 * the Read Local Version Information command.
2462 * If the set_bdaddr driver callback is provided, then
2463 * also the original Bluetooth public device address
2464 * will be read using the Read BD Address command.
2466 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2467 ret = __hci_unconf_init(hdev);
2470 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2471 /* If public address change is configured, ensure that
2472 * the address gets programmed. If the driver does not
2473 * support changing the public address, fail the power
2476 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2478 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2480 ret = -EADDRNOTAVAIL;
2484 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2485 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2486 ret = __hci_init(hdev);
2489 clear_bit(HCI_INIT, &hdev->flags);
2493 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2494 set_bit(HCI_UP, &hdev->flags);
2495 hci_notify(hdev, HCI_DEV_UP);
2496 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2497 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2498 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2499 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2500 hdev->dev_type == HCI_BREDR) {
2502 mgmt_powered(hdev, 1);
2503 hci_dev_unlock(hdev);
2506 /* Init failed, cleanup */
2507 flush_work(&hdev->tx_work);
2508 flush_work(&hdev->cmd_work);
2509 flush_work(&hdev->rx_work);
2511 skb_queue_purge(&hdev->cmd_q);
2512 skb_queue_purge(&hdev->rx_q);
2517 if (hdev->sent_cmd) {
2518 kfree_skb(hdev->sent_cmd);
2519 hdev->sent_cmd = NULL;
2523 hdev->flags &= BIT(HCI_RAW);
2527 hci_req_unlock(hdev);
2531 /* ---- HCI ioctl helpers ---- */
2533 int hci_dev_open(__u16 dev)
2535 struct hci_dev *hdev;
2538 hdev = hci_dev_get(dev);
2542 /* Devices that are marked as unconfigured can only be powered
2543 * up as user channel. Trying to bring them up as normal devices
2544 * will result into a failure. Only user channel operation is
2547 * When this function is called for a user channel, the flag
2548 * HCI_USER_CHANNEL will be set first before attempting to
2551 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2552 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2557 /* We need to ensure that no other power on/off work is pending
2558 * before proceeding to call hci_dev_do_open. This is
2559 * particularly important if the setup procedure has not yet
2562 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2563 cancel_delayed_work(&hdev->power_off);
2565 /* After this call it is guaranteed that the setup procedure
2566 * has finished. This means that error conditions like RFKILL
2567 * or no valid public or static random address apply.
2569 flush_workqueue(hdev->req_workqueue);
2571 /* For controllers not using the management interface and that
2572 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2573 * so that pairing works for them. Once the management interface
2574 * is in use this bit will be cleared again and userspace has
2575 * to explicitly enable it.
2577 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2578 !test_bit(HCI_MGMT, &hdev->dev_flags))
2579 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2581 err = hci_dev_do_open(hdev);
2588 /* This function requires the caller holds hdev->lock */
2589 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2591 struct hci_conn_params *p;
2593 list_for_each_entry(p, &hdev->le_conn_params, list) {
2595 hci_conn_drop(p->conn);
2596 hci_conn_put(p->conn);
2599 list_del_init(&p->action);
2602 BT_DBG("All LE pending actions cleared");
2605 static int hci_dev_do_close(struct hci_dev *hdev)
2607 BT_DBG("%s %p", hdev->name, hdev);
2609 cancel_delayed_work(&hdev->power_off);
2611 hci_req_cancel(hdev, ENODEV);
2614 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2615 cancel_delayed_work_sync(&hdev->cmd_timer);
2616 hci_req_unlock(hdev);
2620 /* Flush RX and TX works */
2621 flush_work(&hdev->tx_work);
2622 flush_work(&hdev->rx_work);
2624 if (hdev->discov_timeout > 0) {
2625 cancel_delayed_work(&hdev->discov_off);
2626 hdev->discov_timeout = 0;
2627 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2628 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2631 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2632 cancel_delayed_work(&hdev->service_cache);
2634 cancel_delayed_work_sync(&hdev->le_scan_disable);
2636 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2637 cancel_delayed_work_sync(&hdev->rpa_expired);
2639 /* Avoid potential lockdep warnings from the *_flush() calls by
2640 * ensuring the workqueue is empty up front.
2642 drain_workqueue(hdev->workqueue);
2646 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2647 if (hdev->dev_type == HCI_BREDR)
2648 mgmt_powered(hdev, 0);
2651 hci_inquiry_cache_flush(hdev);
2652 hci_pend_le_actions_clear(hdev);
2653 hci_conn_hash_flush(hdev);
2654 hci_dev_unlock(hdev);
2656 hci_notify(hdev, HCI_DEV_DOWN);
2662 skb_queue_purge(&hdev->cmd_q);
2663 atomic_set(&hdev->cmd_cnt, 1);
2664 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2665 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2666 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2667 set_bit(HCI_INIT, &hdev->flags);
2668 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2669 clear_bit(HCI_INIT, &hdev->flags);
2672 /* flush cmd work */
2673 flush_work(&hdev->cmd_work);
2676 skb_queue_purge(&hdev->rx_q);
2677 skb_queue_purge(&hdev->cmd_q);
2678 skb_queue_purge(&hdev->raw_q);
2680 /* Drop last sent command */
2681 if (hdev->sent_cmd) {
2682 cancel_delayed_work_sync(&hdev->cmd_timer);
2683 kfree_skb(hdev->sent_cmd);
2684 hdev->sent_cmd = NULL;
2687 kfree_skb(hdev->recv_evt);
2688 hdev->recv_evt = NULL;
2690 /* After this point our queues are empty
2691 * and no tasks are scheduled. */
2695 hdev->flags &= BIT(HCI_RAW);
2696 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2698 /* Controller radio is available but is currently powered down */
2699 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2701 memset(hdev->eir, 0, sizeof(hdev->eir));
2702 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2703 bacpy(&hdev->random_addr, BDADDR_ANY);
2705 hci_req_unlock(hdev);
2711 int hci_dev_close(__u16 dev)
2713 struct hci_dev *hdev;
2716 hdev = hci_dev_get(dev);
2720 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2725 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2726 cancel_delayed_work(&hdev->power_off);
2728 err = hci_dev_do_close(hdev);
2735 int hci_dev_reset(__u16 dev)
2737 struct hci_dev *hdev;
2740 hdev = hci_dev_get(dev);
2746 if (!test_bit(HCI_UP, &hdev->flags)) {
2751 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2756 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2762 skb_queue_purge(&hdev->rx_q);
2763 skb_queue_purge(&hdev->cmd_q);
2765 /* Avoid potential lockdep warnings from the *_flush() calls by
2766 * ensuring the workqueue is empty up front.
2768 drain_workqueue(hdev->workqueue);
2771 hci_inquiry_cache_flush(hdev);
2772 hci_conn_hash_flush(hdev);
2773 hci_dev_unlock(hdev);
2778 atomic_set(&hdev->cmd_cnt, 1);
2779 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2781 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2784 hci_req_unlock(hdev);
2789 int hci_dev_reset_stat(__u16 dev)
2791 struct hci_dev *hdev;
2794 hdev = hci_dev_get(dev);
2798 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2803 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2808 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2815 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2817 bool conn_changed, discov_changed;
2819 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2821 if ((scan & SCAN_PAGE))
2822 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2825 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2828 if ((scan & SCAN_INQUIRY)) {
2829 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2832 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2833 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2837 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2840 if (conn_changed || discov_changed) {
2841 /* In case this was disabled through mgmt */
2842 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2844 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2845 mgmt_update_adv_data(hdev);
2847 mgmt_new_settings(hdev);
2851 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2853 struct hci_dev *hdev;
2854 struct hci_dev_req dr;
2857 if (copy_from_user(&dr, arg, sizeof(dr)))
2860 hdev = hci_dev_get(dr.dev_id);
2864 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2869 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2874 if (hdev->dev_type != HCI_BREDR) {
2879 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2886 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2891 if (!lmp_encrypt_capable(hdev)) {
2896 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2897 /* Auth must be enabled first */
2898 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2904 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2909 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2912 /* Ensure that the connectable and discoverable states
2913 * get correctly modified as this was a non-mgmt change.
2916 hci_update_scan_state(hdev, dr.dev_opt);
2920 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2924 case HCISETLINKMODE:
2925 hdev->link_mode = ((__u16) dr.dev_opt) &
2926 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2930 hdev->pkt_type = (__u16) dr.dev_opt;
2934 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2935 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2939 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2940 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2953 int hci_get_dev_list(void __user *arg)
2955 struct hci_dev *hdev;
2956 struct hci_dev_list_req *dl;
2957 struct hci_dev_req *dr;
2958 int n = 0, size, err;
2961 if (get_user(dev_num, (__u16 __user *) arg))
2964 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2967 size = sizeof(*dl) + dev_num * sizeof(*dr);
2969 dl = kzalloc(size, GFP_KERNEL);
2975 read_lock(&hci_dev_list_lock);
2976 list_for_each_entry(hdev, &hci_dev_list, list) {
2977 unsigned long flags = hdev->flags;
2979 /* When the auto-off is configured it means the transport
2980 * is running, but in that case still indicate that the
2981 * device is actually down.
2983 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2984 flags &= ~BIT(HCI_UP);
2986 (dr + n)->dev_id = hdev->id;
2987 (dr + n)->dev_opt = flags;
2992 read_unlock(&hci_dev_list_lock);
2995 size = sizeof(*dl) + n * sizeof(*dr);
2997 err = copy_to_user(arg, dl, size);
3000 return err ? -EFAULT : 0;
3003 int hci_get_dev_info(void __user *arg)
3005 struct hci_dev *hdev;
3006 struct hci_dev_info di;
3007 unsigned long flags;
3010 if (copy_from_user(&di, arg, sizeof(di)))
3013 hdev = hci_dev_get(di.dev_id);
3017 /* When the auto-off is configured it means the transport
3018 * is running, but in that case still indicate that the
3019 * device is actually down.
3021 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3022 flags = hdev->flags & ~BIT(HCI_UP);
3024 flags = hdev->flags;
3026 strcpy(di.name, hdev->name);
3027 di.bdaddr = hdev->bdaddr;
3028 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
3030 di.pkt_type = hdev->pkt_type;
3031 if (lmp_bredr_capable(hdev)) {
3032 di.acl_mtu = hdev->acl_mtu;
3033 di.acl_pkts = hdev->acl_pkts;
3034 di.sco_mtu = hdev->sco_mtu;
3035 di.sco_pkts = hdev->sco_pkts;
3037 di.acl_mtu = hdev->le_mtu;
3038 di.acl_pkts = hdev->le_pkts;
3042 di.link_policy = hdev->link_policy;
3043 di.link_mode = hdev->link_mode;
3045 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
3046 memcpy(&di.features, &hdev->features, sizeof(di.features));
3048 if (copy_to_user(arg, &di, sizeof(di)))
3056 /* ---- Interface to HCI drivers ---- */
3058 static int hci_rfkill_set_block(void *data, bool blocked)
3060 struct hci_dev *hdev = data;
3062 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3064 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3068 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3069 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3070 !test_bit(HCI_CONFIG, &hdev->dev_flags))
3071 hci_dev_do_close(hdev);
3073 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3079 static const struct rfkill_ops hci_rfkill_ops = {
3080 .set_block = hci_rfkill_set_block,
3083 static void hci_power_on(struct work_struct *work)
3085 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3088 BT_DBG("%s", hdev->name);
3090 err = hci_dev_do_open(hdev);
3093 mgmt_set_powered_failed(hdev, err);
3094 hci_dev_unlock(hdev);
3098 /* During the HCI setup phase, a few error conditions are
3099 * ignored and they need to be checked now. If they are still
3100 * valid, it is important to turn the device back off.
3102 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3103 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3104 (hdev->dev_type == HCI_BREDR &&
3105 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3106 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3107 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3108 hci_dev_do_close(hdev);
3109 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3110 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3111 HCI_AUTO_OFF_TIMEOUT);
3114 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3115 /* For unconfigured devices, set the HCI_RAW flag
3116 * so that userspace can easily identify them.
3118 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3119 set_bit(HCI_RAW, &hdev->flags);
3121 /* For fully configured devices, this will send
3122 * the Index Added event. For unconfigured devices,
3123 * it will send Unconfigued Index Added event.
3125 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3126 * and no event will be send.
3128 mgmt_index_added(hdev);
3129 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3130 /* When the controller is now configured, then it
3131 * is important to clear the HCI_RAW flag.
3133 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3134 clear_bit(HCI_RAW, &hdev->flags);
3136 /* Powering on the controller with HCI_CONFIG set only
3137 * happens with the transition from unconfigured to
3138 * configured. This will send the Index Added event.
3140 mgmt_index_added(hdev);
3144 static void hci_power_off(struct work_struct *work)
3146 struct hci_dev *hdev = container_of(work, struct hci_dev,
3149 BT_DBG("%s", hdev->name);
3151 hci_dev_do_close(hdev);
3154 static void hci_discov_off(struct work_struct *work)
3156 struct hci_dev *hdev;
3158 hdev = container_of(work, struct hci_dev, discov_off.work);
3160 BT_DBG("%s", hdev->name);
3162 mgmt_discoverable_timeout(hdev);
3165 void hci_uuids_clear(struct hci_dev *hdev)
3167 struct bt_uuid *uuid, *tmp;
3169 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3170 list_del(&uuid->list);
3175 void hci_link_keys_clear(struct hci_dev *hdev)
3177 struct link_key *key;
3179 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3180 list_del_rcu(&key->list);
3181 kfree_rcu(key, rcu);
3185 void hci_smp_ltks_clear(struct hci_dev *hdev)
3189 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3190 list_del_rcu(&k->list);
3195 void hci_smp_irks_clear(struct hci_dev *hdev)
3199 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3200 list_del_rcu(&k->list);
3205 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3210 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3211 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3221 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3222 u8 key_type, u8 old_key_type)
3225 if (key_type < 0x03)
3228 /* Debug keys are insecure so don't store them persistently */
3229 if (key_type == HCI_LK_DEBUG_COMBINATION)
3232 /* Changed combination key and there's no previous one */
3233 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3236 /* Security mode 3 case */
3240 /* BR/EDR key derived using SC from an LE link */
3241 if (conn->type == LE_LINK)
3244 /* Neither local nor remote side had no-bonding as requirement */
3245 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3248 /* Local side had dedicated bonding as requirement */
3249 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3252 /* Remote side had dedicated bonding as requirement */
3253 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3256 /* If none of the above criteria match, then don't store the key
3261 static u8 ltk_role(u8 type)
3263 if (type == SMP_LTK)
3264 return HCI_ROLE_MASTER;
3266 return HCI_ROLE_SLAVE;
3269 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3270 u8 addr_type, u8 role)
3275 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3276 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3279 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
3289 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3291 struct smp_irk *irk;
3294 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3295 if (!bacmp(&irk->rpa, rpa)) {
3301 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3302 if (smp_irk_matches(hdev, irk->val, rpa)) {
3303 bacpy(&irk->rpa, rpa);
3313 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3316 struct smp_irk *irk;
3318 /* Identity Address must be public or static random */
3319 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3323 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3324 if (addr_type == irk->addr_type &&
3325 bacmp(bdaddr, &irk->bdaddr) == 0) {
3335 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3336 bdaddr_t *bdaddr, u8 *val, u8 type,
3337 u8 pin_len, bool *persistent)
3339 struct link_key *key, *old_key;
3342 old_key = hci_find_link_key(hdev, bdaddr);
3344 old_key_type = old_key->type;
3347 old_key_type = conn ? conn->key_type : 0xff;
3348 key = kzalloc(sizeof(*key), GFP_KERNEL);
3351 list_add_rcu(&key->list, &hdev->link_keys);
3354 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3356 /* Some buggy controller combinations generate a changed
3357 * combination key for legacy pairing even when there's no
3359 if (type == HCI_LK_CHANGED_COMBINATION &&
3360 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3361 type = HCI_LK_COMBINATION;
3363 conn->key_type = type;
3366 bacpy(&key->bdaddr, bdaddr);
3367 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3368 key->pin_len = pin_len;
3370 if (type == HCI_LK_CHANGED_COMBINATION)
3371 key->type = old_key_type;
3376 *persistent = hci_persistent_key(hdev, conn, type,
3382 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3383 u8 addr_type, u8 type, u8 authenticated,
3384 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3386 struct smp_ltk *key, *old_key;
3387 u8 role = ltk_role(type);
3389 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
3393 key = kzalloc(sizeof(*key), GFP_KERNEL);
3396 list_add_rcu(&key->list, &hdev->long_term_keys);
3399 bacpy(&key->bdaddr, bdaddr);
3400 key->bdaddr_type = addr_type;
3401 memcpy(key->val, tk, sizeof(key->val));
3402 key->authenticated = authenticated;
3405 key->enc_size = enc_size;
3411 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3412 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3414 struct smp_irk *irk;
3416 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3418 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3422 bacpy(&irk->bdaddr, bdaddr);
3423 irk->addr_type = addr_type;
3425 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
3428 memcpy(irk->val, val, 16);
3429 bacpy(&irk->rpa, rpa);
3434 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3436 struct link_key *key;
3438 key = hci_find_link_key(hdev, bdaddr);
3442 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3444 list_del_rcu(&key->list);
3445 kfree_rcu(key, rcu);
3450 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3455 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3456 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3459 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3461 list_del_rcu(&k->list);
3466 return removed ? 0 : -ENOENT;
3469 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3473 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3474 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3477 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3479 list_del_rcu(&k->list);
3484 /* HCI command timer function */
3485 static void hci_cmd_timeout(struct work_struct *work)
3487 struct hci_dev *hdev = container_of(work, struct hci_dev,
3490 if (hdev->sent_cmd) {
3491 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3492 u16 opcode = __le16_to_cpu(sent->opcode);
3494 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3496 BT_ERR("%s command tx timeout", hdev->name);
3499 atomic_set(&hdev->cmd_cnt, 1);
3500 queue_work(hdev->workqueue, &hdev->cmd_work);
3503 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3504 bdaddr_t *bdaddr, u8 bdaddr_type)
3506 struct oob_data *data;
3508 list_for_each_entry(data, &hdev->remote_oob_data, list) {
3509 if (bacmp(bdaddr, &data->bdaddr) != 0)
3511 if (data->bdaddr_type != bdaddr_type)
3519 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3522 struct oob_data *data;
3524 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
3528 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
3530 list_del(&data->list);
3536 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3538 struct oob_data *data, *n;
3540 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3541 list_del(&data->list);
3546 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3547 u8 bdaddr_type, u8 *hash192, u8 *rand192,
3548 u8 *hash256, u8 *rand256)
3550 struct oob_data *data;
3552 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
3554 data = kmalloc(sizeof(*data), GFP_KERNEL);
3558 bacpy(&data->bdaddr, bdaddr);
3559 data->bdaddr_type = bdaddr_type;
3560 list_add(&data->list, &hdev->remote_oob_data);
3563 if (hash192 && rand192) {
3564 memcpy(data->hash192, hash192, sizeof(data->hash192));
3565 memcpy(data->rand192, rand192, sizeof(data->rand192));
3567 memset(data->hash192, 0, sizeof(data->hash192));
3568 memset(data->rand192, 0, sizeof(data->rand192));
3571 if (hash256 && rand256) {
3572 memcpy(data->hash256, hash256, sizeof(data->hash256));
3573 memcpy(data->rand256, rand256, sizeof(data->rand256));
3575 memset(data->hash256, 0, sizeof(data->hash256));
3576 memset(data->rand256, 0, sizeof(data->rand256));
3579 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3584 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3585 bdaddr_t *bdaddr, u8 type)
3587 struct bdaddr_list *b;
3589 list_for_each_entry(b, bdaddr_list, list) {
3590 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3597 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3599 struct list_head *p, *n;
3601 list_for_each_safe(p, n, bdaddr_list) {
3602 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3609 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3611 struct bdaddr_list *entry;
3613 if (!bacmp(bdaddr, BDADDR_ANY))
3616 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3619 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3623 bacpy(&entry->bdaddr, bdaddr);
3624 entry->bdaddr_type = type;
3626 list_add(&entry->list, list);
3631 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3633 struct bdaddr_list *entry;
3635 if (!bacmp(bdaddr, BDADDR_ANY)) {
3636 hci_bdaddr_list_clear(list);
3640 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3644 list_del(&entry->list);
3650 /* This function requires the caller holds hdev->lock */
3651 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3652 bdaddr_t *addr, u8 addr_type)
3654 struct hci_conn_params *params;
3656 /* The conn params list only contains identity addresses */
3657 if (!hci_is_identity_address(addr, addr_type))
3660 list_for_each_entry(params, &hdev->le_conn_params, list) {
3661 if (bacmp(¶ms->addr, addr) == 0 &&
3662 params->addr_type == addr_type) {
3670 /* This function requires the caller holds hdev->lock */
3671 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3672 bdaddr_t *addr, u8 addr_type)
3674 struct hci_conn_params *param;
3676 /* The list only contains identity addresses */
3677 if (!hci_is_identity_address(addr, addr_type))
3680 list_for_each_entry(param, list, action) {
3681 if (bacmp(¶m->addr, addr) == 0 &&
3682 param->addr_type == addr_type)
3689 /* This function requires the caller holds hdev->lock */
3690 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3691 bdaddr_t *addr, u8 addr_type)
3693 struct hci_conn_params *params;
3695 if (!hci_is_identity_address(addr, addr_type))
3698 params = hci_conn_params_lookup(hdev, addr, addr_type);
3702 params = kzalloc(sizeof(*params), GFP_KERNEL);
3704 BT_ERR("Out of memory");
3708 bacpy(¶ms->addr, addr);
3709 params->addr_type = addr_type;
3711 list_add(¶ms->list, &hdev->le_conn_params);
3712 INIT_LIST_HEAD(¶ms->action);
3714 params->conn_min_interval = hdev->le_conn_min_interval;
3715 params->conn_max_interval = hdev->le_conn_max_interval;
3716 params->conn_latency = hdev->le_conn_latency;
3717 params->supervision_timeout = hdev->le_supv_timeout;
3718 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3720 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3725 static void hci_conn_params_free(struct hci_conn_params *params)
3728 hci_conn_drop(params->conn);
3729 hci_conn_put(params->conn);
3732 list_del(¶ms->action);
3733 list_del(¶ms->list);
3737 /* This function requires the caller holds hdev->lock */
3738 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3740 struct hci_conn_params *params;
3742 params = hci_conn_params_lookup(hdev, addr, addr_type);
3746 hci_conn_params_free(params);
3748 hci_update_background_scan(hdev);
3750 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3753 /* This function requires the caller holds hdev->lock */
3754 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3756 struct hci_conn_params *params, *tmp;
3758 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3759 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3761 list_del(¶ms->list);
3765 BT_DBG("All LE disabled connection parameters were removed");
3768 /* This function requires the caller holds hdev->lock */
3769 void hci_conn_params_clear_all(struct hci_dev *hdev)
3771 struct hci_conn_params *params, *tmp;
3773 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3774 hci_conn_params_free(params);
3776 hci_update_background_scan(hdev);
3778 BT_DBG("All LE connection parameters were removed");
3781 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3784 BT_ERR("Failed to start inquiry: status %d", status);
3787 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3788 hci_dev_unlock(hdev);
3793 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3795 /* General inquiry access code (GIAC) */
3796 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3797 struct hci_request req;
3798 struct hci_cp_inquiry cp;
3802 BT_ERR("Failed to disable LE scanning: status %d", status);
3806 switch (hdev->discovery.type) {
3807 case DISCOV_TYPE_LE:
3809 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3810 hci_dev_unlock(hdev);
3813 case DISCOV_TYPE_INTERLEAVED:
3814 hci_req_init(&req, hdev);
3816 memset(&cp, 0, sizeof(cp));
3817 memcpy(&cp.lap, lap, sizeof(cp.lap));
3818 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3819 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3823 hci_inquiry_cache_flush(hdev);
3825 err = hci_req_run(&req, inquiry_complete);
3827 BT_ERR("Inquiry request failed: err %d", err);
3828 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3831 hci_dev_unlock(hdev);
3836 static void le_scan_disable_work(struct work_struct *work)
3838 struct hci_dev *hdev = container_of(work, struct hci_dev,
3839 le_scan_disable.work);
3840 struct hci_request req;
3843 BT_DBG("%s", hdev->name);
3845 hci_req_init(&req, hdev);
3847 hci_req_add_le_scan_disable(&req);
3849 err = hci_req_run(&req, le_scan_disable_work_complete);
3851 BT_ERR("Disable LE scanning request failed: err %d", err);
3854 /* Copy the Identity Address of the controller.
3856 * If the controller has a public BD_ADDR, then by default use that one.
3857 * If this is a LE only controller without a public address, default to
3858 * the static random address.
3860 * For debugging purposes it is possible to force controllers with a
3861 * public address to use the static random address instead.
3863 * In case BR/EDR has been disabled on a dual-mode controller and
3864 * userspace has configured a static address, then that address
3865 * becomes the identity address instead of the public BR/EDR address.
3867 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3870 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3871 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3872 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
3873 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3874 bacpy(bdaddr, &hdev->static_addr);
3875 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3877 bacpy(bdaddr, &hdev->bdaddr);
3878 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3882 /* Alloc HCI device */
3883 struct hci_dev *hci_alloc_dev(void)
3885 struct hci_dev *hdev;
3887 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3891 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3892 hdev->esco_type = (ESCO_HV1);
3893 hdev->link_mode = (HCI_LM_ACCEPT);
3894 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3895 hdev->io_capability = 0x03; /* No Input No Output */
3896 hdev->manufacturer = 0xffff; /* Default to internal use */
3897 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3898 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3900 hdev->sniff_max_interval = 800;
3901 hdev->sniff_min_interval = 80;
3903 hdev->le_adv_channel_map = 0x07;
3904 hdev->le_adv_min_interval = 0x0800;
3905 hdev->le_adv_max_interval = 0x0800;
3906 hdev->le_scan_interval = 0x0060;
3907 hdev->le_scan_window = 0x0030;
3908 hdev->le_conn_min_interval = 0x0028;
3909 hdev->le_conn_max_interval = 0x0038;
3910 hdev->le_conn_latency = 0x0000;
3911 hdev->le_supv_timeout = 0x002a;
3913 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3914 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3915 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3916 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3918 mutex_init(&hdev->lock);
3919 mutex_init(&hdev->req_lock);
3921 INIT_LIST_HEAD(&hdev->mgmt_pending);
3922 INIT_LIST_HEAD(&hdev->blacklist);
3923 INIT_LIST_HEAD(&hdev->whitelist);
3924 INIT_LIST_HEAD(&hdev->uuids);
3925 INIT_LIST_HEAD(&hdev->link_keys);
3926 INIT_LIST_HEAD(&hdev->long_term_keys);
3927 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3928 INIT_LIST_HEAD(&hdev->remote_oob_data);
3929 INIT_LIST_HEAD(&hdev->le_white_list);
3930 INIT_LIST_HEAD(&hdev->le_conn_params);
3931 INIT_LIST_HEAD(&hdev->pend_le_conns);
3932 INIT_LIST_HEAD(&hdev->pend_le_reports);
3933 INIT_LIST_HEAD(&hdev->conn_hash.list);
3935 INIT_WORK(&hdev->rx_work, hci_rx_work);
3936 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3937 INIT_WORK(&hdev->tx_work, hci_tx_work);
3938 INIT_WORK(&hdev->power_on, hci_power_on);
3940 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3941 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3942 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3944 skb_queue_head_init(&hdev->rx_q);
3945 skb_queue_head_init(&hdev->cmd_q);
3946 skb_queue_head_init(&hdev->raw_q);
3948 init_waitqueue_head(&hdev->req_wait_q);
3950 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3952 hci_init_sysfs(hdev);
3953 discovery_init(hdev);
3957 EXPORT_SYMBOL(hci_alloc_dev);
3959 /* Free HCI device */
3960 void hci_free_dev(struct hci_dev *hdev)
3962 /* will free via device release */
3963 put_device(&hdev->dev);
3965 EXPORT_SYMBOL(hci_free_dev);
3967 /* Register HCI device */
3968 int hci_register_dev(struct hci_dev *hdev)
3972 if (!hdev->open || !hdev->close || !hdev->send)
3975 /* Do not allow HCI_AMP devices to register at index 0,
3976 * so the index can be used as the AMP controller ID.
3978 switch (hdev->dev_type) {
3980 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3983 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3992 sprintf(hdev->name, "hci%d", id);
3995 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3997 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3998 WQ_MEM_RECLAIM, 1, hdev->name);
3999 if (!hdev->workqueue) {
4004 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4005 WQ_MEM_RECLAIM, 1, hdev->name);
4006 if (!hdev->req_workqueue) {
4007 destroy_workqueue(hdev->workqueue);
4012 if (!IS_ERR_OR_NULL(bt_debugfs))
4013 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4015 dev_set_name(&hdev->dev, "%s", hdev->name);
4017 error = device_add(&hdev->dev);
4021 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4022 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4025 if (rfkill_register(hdev->rfkill) < 0) {
4026 rfkill_destroy(hdev->rfkill);
4027 hdev->rfkill = NULL;
4031 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4032 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4034 set_bit(HCI_SETUP, &hdev->dev_flags);
4035 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4037 if (hdev->dev_type == HCI_BREDR) {
4038 /* Assume BR/EDR support until proven otherwise (such as
4039 * through reading supported features during init.
4041 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4044 write_lock(&hci_dev_list_lock);
4045 list_add(&hdev->list, &hci_dev_list);
4046 write_unlock(&hci_dev_list_lock);
4048 /* Devices that are marked for raw-only usage are unconfigured
4049 * and should not be included in normal operation.
4051 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4052 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4054 hci_notify(hdev, HCI_DEV_REG);
4057 queue_work(hdev->req_workqueue, &hdev->power_on);
4062 destroy_workqueue(hdev->workqueue);
4063 destroy_workqueue(hdev->req_workqueue);
4065 ida_simple_remove(&hci_index_ida, hdev->id);
4069 EXPORT_SYMBOL(hci_register_dev);
4071 /* Unregister HCI device */
4072 void hci_unregister_dev(struct hci_dev *hdev)
4076 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4078 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4082 write_lock(&hci_dev_list_lock);
4083 list_del(&hdev->list);
4084 write_unlock(&hci_dev_list_lock);
4086 hci_dev_do_close(hdev);
4088 for (i = 0; i < NUM_REASSEMBLY; i++)
4089 kfree_skb(hdev->reassembly[i]);
4091 cancel_work_sync(&hdev->power_on);
4093 if (!test_bit(HCI_INIT, &hdev->flags) &&
4094 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4095 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4097 mgmt_index_removed(hdev);
4098 hci_dev_unlock(hdev);
4101 /* mgmt_index_removed should take care of emptying the
4103 BUG_ON(!list_empty(&hdev->mgmt_pending));
4105 hci_notify(hdev, HCI_DEV_UNREG);
4108 rfkill_unregister(hdev->rfkill);
4109 rfkill_destroy(hdev->rfkill);
4112 smp_unregister(hdev);
4114 device_del(&hdev->dev);
4116 debugfs_remove_recursive(hdev->debugfs);
4118 destroy_workqueue(hdev->workqueue);
4119 destroy_workqueue(hdev->req_workqueue);
4122 hci_bdaddr_list_clear(&hdev->blacklist);
4123 hci_bdaddr_list_clear(&hdev->whitelist);
4124 hci_uuids_clear(hdev);
4125 hci_link_keys_clear(hdev);
4126 hci_smp_ltks_clear(hdev);
4127 hci_smp_irks_clear(hdev);
4128 hci_remote_oob_data_clear(hdev);
4129 hci_bdaddr_list_clear(&hdev->le_white_list);
4130 hci_conn_params_clear_all(hdev);
4131 hci_discovery_filter_clear(hdev);
4132 hci_dev_unlock(hdev);
4136 ida_simple_remove(&hci_index_ida, id);
4138 EXPORT_SYMBOL(hci_unregister_dev);
4140 /* Suspend HCI device */
4141 int hci_suspend_dev(struct hci_dev *hdev)
4143 hci_notify(hdev, HCI_DEV_SUSPEND);
4146 EXPORT_SYMBOL(hci_suspend_dev);
4148 /* Resume HCI device */
4149 int hci_resume_dev(struct hci_dev *hdev)
4151 hci_notify(hdev, HCI_DEV_RESUME);
4154 EXPORT_SYMBOL(hci_resume_dev);
4156 /* Reset HCI device */
4157 int hci_reset_dev(struct hci_dev *hdev)
4159 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4160 struct sk_buff *skb;
4162 skb = bt_skb_alloc(3, GFP_ATOMIC);
4166 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4167 memcpy(skb_put(skb, 3), hw_err, 3);
4169 /* Send Hardware Error to upper stack */
4170 return hci_recv_frame(hdev, skb);
4172 EXPORT_SYMBOL(hci_reset_dev);
4174 /* Receive frame from HCI drivers */
4175 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4177 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4178 && !test_bit(HCI_INIT, &hdev->flags))) {
4184 bt_cb(skb)->incoming = 1;
4187 __net_timestamp(skb);
4189 skb_queue_tail(&hdev->rx_q, skb);
4190 queue_work(hdev->workqueue, &hdev->rx_work);
4194 EXPORT_SYMBOL(hci_recv_frame);
4196 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4197 int count, __u8 index)
4202 struct sk_buff *skb;
4203 struct bt_skb_cb *scb;
4205 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4206 index >= NUM_REASSEMBLY)
4209 skb = hdev->reassembly[index];
4213 case HCI_ACLDATA_PKT:
4214 len = HCI_MAX_FRAME_SIZE;
4215 hlen = HCI_ACL_HDR_SIZE;
4218 len = HCI_MAX_EVENT_SIZE;
4219 hlen = HCI_EVENT_HDR_SIZE;
4221 case HCI_SCODATA_PKT:
4222 len = HCI_MAX_SCO_SIZE;
4223 hlen = HCI_SCO_HDR_SIZE;
4227 skb = bt_skb_alloc(len, GFP_ATOMIC);
4231 scb = (void *) skb->cb;
4233 scb->pkt_type = type;
4235 hdev->reassembly[index] = skb;
4239 scb = (void *) skb->cb;
4240 len = min_t(uint, scb->expect, count);
4242 memcpy(skb_put(skb, len), data, len);
4251 if (skb->len == HCI_EVENT_HDR_SIZE) {
4252 struct hci_event_hdr *h = hci_event_hdr(skb);
4253 scb->expect = h->plen;
4255 if (skb_tailroom(skb) < scb->expect) {
4257 hdev->reassembly[index] = NULL;
4263 case HCI_ACLDATA_PKT:
4264 if (skb->len == HCI_ACL_HDR_SIZE) {
4265 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4266 scb->expect = __le16_to_cpu(h->dlen);
4268 if (skb_tailroom(skb) < scb->expect) {
4270 hdev->reassembly[index] = NULL;
4276 case HCI_SCODATA_PKT:
4277 if (skb->len == HCI_SCO_HDR_SIZE) {
4278 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4279 scb->expect = h->dlen;
4281 if (skb_tailroom(skb) < scb->expect) {
4283 hdev->reassembly[index] = NULL;
4290 if (scb->expect == 0) {
4291 /* Complete frame */
4293 bt_cb(skb)->pkt_type = type;
4294 hci_recv_frame(hdev, skb);
4296 hdev->reassembly[index] = NULL;
4304 #define STREAM_REASSEMBLY 0
4306 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4312 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4315 struct { char type; } *pkt;
4317 /* Start of the frame */
4324 type = bt_cb(skb)->pkt_type;
4326 rem = hci_reassembly(hdev, type, data, count,
4331 data += (count - rem);
4337 EXPORT_SYMBOL(hci_recv_stream_fragment);
4339 /* ---- Interface to upper protocols ---- */
4341 int hci_register_cb(struct hci_cb *cb)
4343 BT_DBG("%p name %s", cb, cb->name);
4345 write_lock(&hci_cb_list_lock);
4346 list_add(&cb->list, &hci_cb_list);
4347 write_unlock(&hci_cb_list_lock);
4351 EXPORT_SYMBOL(hci_register_cb);
4353 int hci_unregister_cb(struct hci_cb *cb)
4355 BT_DBG("%p name %s", cb, cb->name);
4357 write_lock(&hci_cb_list_lock);
4358 list_del(&cb->list);
4359 write_unlock(&hci_cb_list_lock);
4363 EXPORT_SYMBOL(hci_unregister_cb);
4365 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4369 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4372 __net_timestamp(skb);
4374 /* Send copy to monitor */
4375 hci_send_to_monitor(hdev, skb);
4377 if (atomic_read(&hdev->promisc)) {
4378 /* Send copy to the sockets */
4379 hci_send_to_sock(hdev, skb);
4382 /* Get rid of skb owner, prior to sending to the driver. */
4385 err = hdev->send(hdev, skb);
4387 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4392 bool hci_req_pending(struct hci_dev *hdev)
4394 return (hdev->req_status == HCI_REQ_PEND);
4397 /* Send HCI command */
4398 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4401 struct sk_buff *skb;
4403 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4405 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4407 BT_ERR("%s no memory for command", hdev->name);
4411 /* Stand-alone HCI commands must be flagged as
4412 * single-command requests.
4414 bt_cb(skb)->req.start = true;
4416 skb_queue_tail(&hdev->cmd_q, skb);
4417 queue_work(hdev->workqueue, &hdev->cmd_work);
4422 /* Get data from the previously sent command */
4423 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4425 struct hci_command_hdr *hdr;
4427 if (!hdev->sent_cmd)
4430 hdr = (void *) hdev->sent_cmd->data;
4432 if (hdr->opcode != cpu_to_le16(opcode))
4435 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4437 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4441 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4443 struct hci_acl_hdr *hdr;
4446 skb_push(skb, HCI_ACL_HDR_SIZE);
4447 skb_reset_transport_header(skb);
4448 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4449 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4450 hdr->dlen = cpu_to_le16(len);
4453 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4454 struct sk_buff *skb, __u16 flags)
4456 struct hci_conn *conn = chan->conn;
4457 struct hci_dev *hdev = conn->hdev;
4458 struct sk_buff *list;
4460 skb->len = skb_headlen(skb);
4463 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4465 switch (hdev->dev_type) {
4467 hci_add_acl_hdr(skb, conn->handle, flags);
4470 hci_add_acl_hdr(skb, chan->handle, flags);
4473 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4477 list = skb_shinfo(skb)->frag_list;
4479 /* Non fragmented */
4480 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4482 skb_queue_tail(queue, skb);
4485 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4487 skb_shinfo(skb)->frag_list = NULL;
4489 /* Queue all fragments atomically. We need to use spin_lock_bh
4490 * here because of 6LoWPAN links, as there this function is
4491 * called from softirq and using normal spin lock could cause
4494 spin_lock_bh(&queue->lock);
4496 __skb_queue_tail(queue, skb);
4498 flags &= ~ACL_START;
4501 skb = list; list = list->next;
4503 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4504 hci_add_acl_hdr(skb, conn->handle, flags);
4506 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4508 __skb_queue_tail(queue, skb);
4511 spin_unlock_bh(&queue->lock);
4515 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4517 struct hci_dev *hdev = chan->conn->hdev;
4519 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4521 hci_queue_acl(chan, &chan->data_q, skb, flags);
4523 queue_work(hdev->workqueue, &hdev->tx_work);
4527 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4529 struct hci_dev *hdev = conn->hdev;
4530 struct hci_sco_hdr hdr;
4532 BT_DBG("%s len %d", hdev->name, skb->len);
4534 hdr.handle = cpu_to_le16(conn->handle);
4535 hdr.dlen = skb->len;
4537 skb_push(skb, HCI_SCO_HDR_SIZE);
4538 skb_reset_transport_header(skb);
4539 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4541 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4543 skb_queue_tail(&conn->data_q, skb);
4544 queue_work(hdev->workqueue, &hdev->tx_work);
4547 /* ---- HCI TX task (outgoing data) ---- */
4549 /* HCI Connection scheduler */
4550 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4553 struct hci_conn_hash *h = &hdev->conn_hash;
4554 struct hci_conn *conn = NULL, *c;
4555 unsigned int num = 0, min = ~0;
4557 /* We don't have to lock device here. Connections are always
4558 * added and removed with TX task disabled. */
4562 list_for_each_entry_rcu(c, &h->list, list) {
4563 if (c->type != type || skb_queue_empty(&c->data_q))
4566 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4571 if (c->sent < min) {
4576 if (hci_conn_num(hdev, type) == num)
4585 switch (conn->type) {
4587 cnt = hdev->acl_cnt;
4591 cnt = hdev->sco_cnt;
4594 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4598 BT_ERR("Unknown link type");
4606 BT_DBG("conn %p quote %d", conn, *quote);
4610 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4612 struct hci_conn_hash *h = &hdev->conn_hash;
4615 BT_ERR("%s link tx timeout", hdev->name);
4619 /* Kill stalled connections */
4620 list_for_each_entry_rcu(c, &h->list, list) {
4621 if (c->type == type && c->sent) {
4622 BT_ERR("%s killing stalled connection %pMR",
4623 hdev->name, &c->dst);
4624 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4631 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4634 struct hci_conn_hash *h = &hdev->conn_hash;
4635 struct hci_chan *chan = NULL;
4636 unsigned int num = 0, min = ~0, cur_prio = 0;
4637 struct hci_conn *conn;
4638 int cnt, q, conn_num = 0;
4640 BT_DBG("%s", hdev->name);
4644 list_for_each_entry_rcu(conn, &h->list, list) {
4645 struct hci_chan *tmp;
4647 if (conn->type != type)
4650 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4655 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4656 struct sk_buff *skb;
4658 if (skb_queue_empty(&tmp->data_q))
4661 skb = skb_peek(&tmp->data_q);
4662 if (skb->priority < cur_prio)
4665 if (skb->priority > cur_prio) {
4668 cur_prio = skb->priority;
4673 if (conn->sent < min) {
4679 if (hci_conn_num(hdev, type) == conn_num)
4688 switch (chan->conn->type) {
4690 cnt = hdev->acl_cnt;
4693 cnt = hdev->block_cnt;
4697 cnt = hdev->sco_cnt;
4700 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4704 BT_ERR("Unknown link type");
4709 BT_DBG("chan %p quote %d", chan, *quote);
4713 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4715 struct hci_conn_hash *h = &hdev->conn_hash;
4716 struct hci_conn *conn;
4719 BT_DBG("%s", hdev->name);
4723 list_for_each_entry_rcu(conn, &h->list, list) {
4724 struct hci_chan *chan;
4726 if (conn->type != type)
4729 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4734 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4735 struct sk_buff *skb;
4742 if (skb_queue_empty(&chan->data_q))
4745 skb = skb_peek(&chan->data_q);
4746 if (skb->priority >= HCI_PRIO_MAX - 1)
4749 skb->priority = HCI_PRIO_MAX - 1;
4751 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4755 if (hci_conn_num(hdev, type) == num)
4763 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4765 /* Calculate count of blocks used by this packet */
4766 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4769 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4771 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4772 /* ACL tx timeout must be longer than maximum
4773 * link supervision timeout (40.9 seconds) */
4774 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4775 HCI_ACL_TX_TIMEOUT))
4776 hci_link_tx_to(hdev, ACL_LINK);
4780 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4782 unsigned int cnt = hdev->acl_cnt;
4783 struct hci_chan *chan;
4784 struct sk_buff *skb;
4787 __check_timeout(hdev, cnt);
4789 while (hdev->acl_cnt &&
4790 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4791 u32 priority = (skb_peek(&chan->data_q))->priority;
4792 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4793 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4794 skb->len, skb->priority);
4796 /* Stop if priority has changed */
4797 if (skb->priority < priority)
4800 skb = skb_dequeue(&chan->data_q);
4802 hci_conn_enter_active_mode(chan->conn,
4803 bt_cb(skb)->force_active);
4805 hci_send_frame(hdev, skb);
4806 hdev->acl_last_tx = jiffies;
4814 if (cnt != hdev->acl_cnt)
4815 hci_prio_recalculate(hdev, ACL_LINK);
4818 static void hci_sched_acl_blk(struct hci_dev *hdev)
4820 unsigned int cnt = hdev->block_cnt;
4821 struct hci_chan *chan;
4822 struct sk_buff *skb;
4826 __check_timeout(hdev, cnt);
4828 BT_DBG("%s", hdev->name);
4830 if (hdev->dev_type == HCI_AMP)
4835 while (hdev->block_cnt > 0 &&
4836 (chan = hci_chan_sent(hdev, type, "e))) {
4837 u32 priority = (skb_peek(&chan->data_q))->priority;
4838 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4841 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4842 skb->len, skb->priority);
4844 /* Stop if priority has changed */
4845 if (skb->priority < priority)
4848 skb = skb_dequeue(&chan->data_q);
4850 blocks = __get_blocks(hdev, skb);
4851 if (blocks > hdev->block_cnt)
4854 hci_conn_enter_active_mode(chan->conn,
4855 bt_cb(skb)->force_active);
4857 hci_send_frame(hdev, skb);
4858 hdev->acl_last_tx = jiffies;
4860 hdev->block_cnt -= blocks;
4863 chan->sent += blocks;
4864 chan->conn->sent += blocks;
4868 if (cnt != hdev->block_cnt)
4869 hci_prio_recalculate(hdev, type);
4872 static void hci_sched_acl(struct hci_dev *hdev)
4874 BT_DBG("%s", hdev->name);
4876 /* No ACL link over BR/EDR controller */
4877 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4880 /* No AMP link over AMP controller */
4881 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4884 switch (hdev->flow_ctl_mode) {
4885 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4886 hci_sched_acl_pkt(hdev);
4889 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4890 hci_sched_acl_blk(hdev);
4896 static void hci_sched_sco(struct hci_dev *hdev)
4898 struct hci_conn *conn;
4899 struct sk_buff *skb;
4902 BT_DBG("%s", hdev->name);
4904 if (!hci_conn_num(hdev, SCO_LINK))
4907 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4908 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4909 BT_DBG("skb %p len %d", skb, skb->len);
4910 hci_send_frame(hdev, skb);
4913 if (conn->sent == ~0)
4919 static void hci_sched_esco(struct hci_dev *hdev)
4921 struct hci_conn *conn;
4922 struct sk_buff *skb;
4925 BT_DBG("%s", hdev->name);
4927 if (!hci_conn_num(hdev, ESCO_LINK))
4930 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4932 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4933 BT_DBG("skb %p len %d", skb, skb->len);
4934 hci_send_frame(hdev, skb);
4937 if (conn->sent == ~0)
4943 static void hci_sched_le(struct hci_dev *hdev)
4945 struct hci_chan *chan;
4946 struct sk_buff *skb;
4947 int quote, cnt, tmp;
4949 BT_DBG("%s", hdev->name);
4951 if (!hci_conn_num(hdev, LE_LINK))
4954 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4955 /* LE tx timeout must be longer than maximum
4956 * link supervision timeout (40.9 seconds) */
4957 if (!hdev->le_cnt && hdev->le_pkts &&
4958 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4959 hci_link_tx_to(hdev, LE_LINK);
4962 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4964 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4965 u32 priority = (skb_peek(&chan->data_q))->priority;
4966 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4967 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4968 skb->len, skb->priority);
4970 /* Stop if priority has changed */
4971 if (skb->priority < priority)
4974 skb = skb_dequeue(&chan->data_q);
4976 hci_send_frame(hdev, skb);
4977 hdev->le_last_tx = jiffies;
4988 hdev->acl_cnt = cnt;
4991 hci_prio_recalculate(hdev, LE_LINK);
4994 static void hci_tx_work(struct work_struct *work)
4996 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4997 struct sk_buff *skb;
4999 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5000 hdev->sco_cnt, hdev->le_cnt);
5002 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5003 /* Schedule queues and send stuff to HCI driver */
5004 hci_sched_acl(hdev);
5005 hci_sched_sco(hdev);
5006 hci_sched_esco(hdev);
5010 /* Send next queued raw (unknown type) packet */
5011 while ((skb = skb_dequeue(&hdev->raw_q)))
5012 hci_send_frame(hdev, skb);
5015 /* ----- HCI RX task (incoming data processing) ----- */
5017 /* ACL data packet */
5018 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5020 struct hci_acl_hdr *hdr = (void *) skb->data;
5021 struct hci_conn *conn;
5022 __u16 handle, flags;
5024 skb_pull(skb, HCI_ACL_HDR_SIZE);
5026 handle = __le16_to_cpu(hdr->handle);
5027 flags = hci_flags(handle);
5028 handle = hci_handle(handle);
5030 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5033 hdev->stat.acl_rx++;
5036 conn = hci_conn_hash_lookup_handle(hdev, handle);
5037 hci_dev_unlock(hdev);
5040 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5042 /* Send to upper protocol */
5043 l2cap_recv_acldata(conn, skb, flags);
5046 BT_ERR("%s ACL packet for unknown connection handle %d",
5047 hdev->name, handle);
5053 /* SCO data packet */
5054 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5056 struct hci_sco_hdr *hdr = (void *) skb->data;
5057 struct hci_conn *conn;
5060 skb_pull(skb, HCI_SCO_HDR_SIZE);
5062 handle = __le16_to_cpu(hdr->handle);
5064 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5066 hdev->stat.sco_rx++;
5069 conn = hci_conn_hash_lookup_handle(hdev, handle);
5070 hci_dev_unlock(hdev);
5073 /* Send to upper protocol */
5074 sco_recv_scodata(conn, skb);
5077 BT_ERR("%s SCO packet for unknown connection handle %d",
5078 hdev->name, handle);
5084 static bool hci_req_is_complete(struct hci_dev *hdev)
5086 struct sk_buff *skb;
5088 skb = skb_peek(&hdev->cmd_q);
5092 return bt_cb(skb)->req.start;
5095 static void hci_resend_last(struct hci_dev *hdev)
5097 struct hci_command_hdr *sent;
5098 struct sk_buff *skb;
5101 if (!hdev->sent_cmd)
5104 sent = (void *) hdev->sent_cmd->data;
5105 opcode = __le16_to_cpu(sent->opcode);
5106 if (opcode == HCI_OP_RESET)
5109 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5113 skb_queue_head(&hdev->cmd_q, skb);
5114 queue_work(hdev->workqueue, &hdev->cmd_work);
5117 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5119 hci_req_complete_t req_complete = NULL;
5120 struct sk_buff *skb;
5121 unsigned long flags;
5123 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5125 /* If the completed command doesn't match the last one that was
5126 * sent we need to do special handling of it.
5128 if (!hci_sent_cmd_data(hdev, opcode)) {
5129 /* Some CSR based controllers generate a spontaneous
5130 * reset complete event during init and any pending
5131 * command will never be completed. In such a case we
5132 * need to resend whatever was the last sent
5135 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5136 hci_resend_last(hdev);
5141 /* If the command succeeded and there's still more commands in
5142 * this request the request is not yet complete.
5144 if (!status && !hci_req_is_complete(hdev))
5147 /* If this was the last command in a request the complete
5148 * callback would be found in hdev->sent_cmd instead of the
5149 * command queue (hdev->cmd_q).
5151 if (hdev->sent_cmd) {
5152 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5155 /* We must set the complete callback to NULL to
5156 * avoid calling the callback more than once if
5157 * this function gets called again.
5159 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5165 /* Remove all pending commands belonging to this request */
5166 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5167 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5168 if (bt_cb(skb)->req.start) {
5169 __skb_queue_head(&hdev->cmd_q, skb);
5173 req_complete = bt_cb(skb)->req.complete;
5176 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5180 req_complete(hdev, status);
5183 static void hci_rx_work(struct work_struct *work)
5185 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5186 struct sk_buff *skb;
5188 BT_DBG("%s", hdev->name);
5190 while ((skb = skb_dequeue(&hdev->rx_q))) {
5191 /* Send copy to monitor */
5192 hci_send_to_monitor(hdev, skb);
5194 if (atomic_read(&hdev->promisc)) {
5195 /* Send copy to the sockets */
5196 hci_send_to_sock(hdev, skb);
5199 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5204 if (test_bit(HCI_INIT, &hdev->flags)) {
5205 /* Don't process data packets in this states. */
5206 switch (bt_cb(skb)->pkt_type) {
5207 case HCI_ACLDATA_PKT:
5208 case HCI_SCODATA_PKT:
5215 switch (bt_cb(skb)->pkt_type) {
5217 BT_DBG("%s Event packet", hdev->name);
5218 hci_event_packet(hdev, skb);
5221 case HCI_ACLDATA_PKT:
5222 BT_DBG("%s ACL data packet", hdev->name);
5223 hci_acldata_packet(hdev, skb);
5226 case HCI_SCODATA_PKT:
5227 BT_DBG("%s SCO data packet", hdev->name);
5228 hci_scodata_packet(hdev, skb);
5238 static void hci_cmd_work(struct work_struct *work)
5240 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5241 struct sk_buff *skb;
5243 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5244 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5246 /* Send queued commands */
5247 if (atomic_read(&hdev->cmd_cnt)) {
5248 skb = skb_dequeue(&hdev->cmd_q);
5252 kfree_skb(hdev->sent_cmd);
5254 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5255 if (hdev->sent_cmd) {
5256 atomic_dec(&hdev->cmd_cnt);
5257 hci_send_frame(hdev, skb);
5258 if (test_bit(HCI_RESET, &hdev->flags))
5259 cancel_delayed_work(&hdev->cmd_timer);
5261 schedule_delayed_work(&hdev->cmd_timer,
5264 skb_queue_head(&hdev->cmd_q, skb);
5265 queue_work(hdev->workqueue, &hdev->cmd_work);