2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <asm/unaligned.h>
34 #include <net/bluetooth/bluetooth.h>
35 #include <net/bluetooth/hci_core.h>
37 static void hci_rx_work(struct work_struct *work);
38 static void hci_cmd_work(struct work_struct *work);
39 static void hci_tx_work(struct work_struct *work);
42 LIST_HEAD(hci_dev_list);
43 DEFINE_RWLOCK(hci_dev_list_lock);
45 /* HCI callback list */
46 LIST_HEAD(hci_cb_list);
47 DEFINE_RWLOCK(hci_cb_list_lock);
49 /* HCI ID Numbering */
50 static DEFINE_IDA(hci_index_ida);
52 /* ---- HCI notifications ---- */
54 static void hci_notify(struct hci_dev *hdev, int event)
56 hci_sock_dev_event(hdev, event);
59 /* ---- HCI debugfs entries ---- */
61 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
64 struct hci_dev *hdev = file->private_data;
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
73 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
76 struct hci_dev *hdev = file->private_data;
79 size_t buf_size = min(count, (sizeof(buf)-1));
83 if (!test_bit(HCI_UP, &hdev->flags))
86 if (copy_from_user(buf, user_buf, buf_size))
90 if (strtobool(buf, &enable))
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
103 hci_req_unlock(hdev);
108 err = -bt_to_errno(skb->data[0]);
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
119 static const struct file_operations dut_mode_fops = {
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
126 static int features_show(struct seq_file *f, void *ptr)
128 struct hci_dev *hdev = f->private;
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
147 hci_dev_unlock(hdev);
152 static int features_open(struct inode *inode, struct file *file)
154 return single_open(file, features_show, inode->i_private);
157 static const struct file_operations features_fops = {
158 .open = features_open,
161 .release = single_release,
164 static int blacklist_show(struct seq_file *f, void *p)
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
170 list_for_each_entry(b, &hdev->blacklist, list)
171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
172 hci_dev_unlock(hdev);
177 static int blacklist_open(struct inode *inode, struct file *file)
179 return single_open(file, blacklist_show, inode->i_private);
182 static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
186 .release = single_release,
189 static int uuids_show(struct seq_file *f, void *p)
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
195 list_for_each_entry(uuid, &hdev->uuids, list) {
198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
205 seq_printf(f, "%pUb\n", val);
207 hci_dev_unlock(hdev);
212 static int uuids_open(struct inode *inode, struct file *file)
214 return single_open(file, uuids_show, inode->i_private);
217 static const struct file_operations uuids_fops = {
221 .release = single_release,
224 static int inquiry_cache_show(struct seq_file *f, void *p)
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
243 hci_dev_unlock(hdev);
248 static int inquiry_cache_open(struct inode *inode, struct file *file)
250 return single_open(file, inquiry_cache_show, inode->i_private);
253 static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
257 .release = single_release,
260 static int link_keys_show(struct seq_file *f, void *ptr)
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
271 hci_dev_unlock(hdev);
276 static int link_keys_open(struct inode *inode, struct file *file)
278 return single_open(file, link_keys_show, inode->i_private);
281 static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
285 .release = single_release,
288 static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
289 size_t count, loff_t *ppos)
291 struct hci_dev *hdev = file->private_data;
294 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
297 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
300 static const struct file_operations use_debug_keys_fops = {
302 .read = use_debug_keys_read,
303 .llseek = default_llseek,
306 static int dev_class_show(struct seq_file *f, void *ptr)
308 struct hci_dev *hdev = f->private;
311 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
312 hdev->dev_class[1], hdev->dev_class[0]);
313 hci_dev_unlock(hdev);
318 static int dev_class_open(struct inode *inode, struct file *file)
320 return single_open(file, dev_class_show, inode->i_private);
323 static const struct file_operations dev_class_fops = {
324 .open = dev_class_open,
327 .release = single_release,
330 static int voice_setting_get(void *data, u64 *val)
332 struct hci_dev *hdev = data;
335 *val = hdev->voice_setting;
336 hci_dev_unlock(hdev);
341 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
342 NULL, "0x%4.4llx\n");
344 static int auto_accept_delay_set(void *data, u64 val)
346 struct hci_dev *hdev = data;
349 hdev->auto_accept_delay = val;
350 hci_dev_unlock(hdev);
355 static int auto_accept_delay_get(void *data, u64 *val)
357 struct hci_dev *hdev = data;
360 *val = hdev->auto_accept_delay;
361 hci_dev_unlock(hdev);
366 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
367 auto_accept_delay_set, "%llu\n");
369 static int ssp_debug_mode_set(void *data, u64 val)
371 struct hci_dev *hdev = data;
376 if (val != 0 && val != 1)
379 if (!test_bit(HCI_UP, &hdev->flags))
384 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
385 &mode, HCI_CMD_TIMEOUT);
386 hci_req_unlock(hdev);
391 err = -bt_to_errno(skb->data[0]);
398 hdev->ssp_debug_mode = val;
399 hci_dev_unlock(hdev);
404 static int ssp_debug_mode_get(void *data, u64 *val)
406 struct hci_dev *hdev = data;
409 *val = hdev->ssp_debug_mode;
410 hci_dev_unlock(hdev);
415 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
416 ssp_debug_mode_set, "%llu\n");
418 static int idle_timeout_set(void *data, u64 val)
420 struct hci_dev *hdev = data;
422 if (val != 0 && (val < 500 || val > 3600000))
426 hdev->idle_timeout = val;
427 hci_dev_unlock(hdev);
432 static int idle_timeout_get(void *data, u64 *val)
434 struct hci_dev *hdev = data;
437 *val = hdev->idle_timeout;
438 hci_dev_unlock(hdev);
443 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
444 idle_timeout_set, "%llu\n");
446 static int sniff_min_interval_set(void *data, u64 val)
448 struct hci_dev *hdev = data;
450 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
454 hdev->sniff_min_interval = val;
455 hci_dev_unlock(hdev);
460 static int sniff_min_interval_get(void *data, u64 *val)
462 struct hci_dev *hdev = data;
465 *val = hdev->sniff_min_interval;
466 hci_dev_unlock(hdev);
471 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
472 sniff_min_interval_set, "%llu\n");
474 static int sniff_max_interval_set(void *data, u64 val)
476 struct hci_dev *hdev = data;
478 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
482 hdev->sniff_max_interval = val;
483 hci_dev_unlock(hdev);
488 static int sniff_max_interval_get(void *data, u64 *val)
490 struct hci_dev *hdev = data;
493 *val = hdev->sniff_max_interval;
494 hci_dev_unlock(hdev);
499 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
500 sniff_max_interval_set, "%llu\n");
502 static int static_address_show(struct seq_file *f, void *p)
504 struct hci_dev *hdev = f->private;
507 seq_printf(f, "%pMR\n", &hdev->static_addr);
508 hci_dev_unlock(hdev);
513 static int static_address_open(struct inode *inode, struct file *file)
515 return single_open(file, static_address_show, inode->i_private);
518 static const struct file_operations static_address_fops = {
519 .open = static_address_open,
522 .release = single_release,
525 static int own_address_type_set(void *data, u64 val)
527 struct hci_dev *hdev = data;
529 if (val != 0 && val != 1)
533 hdev->own_addr_type = val;
534 hci_dev_unlock(hdev);
539 static int own_address_type_get(void *data, u64 *val)
541 struct hci_dev *hdev = data;
544 *val = hdev->own_addr_type;
545 hci_dev_unlock(hdev);
550 DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
551 own_address_type_set, "%llu\n");
553 static int long_term_keys_show(struct seq_file *f, void *ptr)
555 struct hci_dev *hdev = f->private;
556 struct list_head *p, *n;
559 list_for_each_safe(p, n, &hdev->link_keys) {
560 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
561 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
562 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
563 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
564 8, ltk->rand, 16, ltk->val);
566 hci_dev_unlock(hdev);
571 static int long_term_keys_open(struct inode *inode, struct file *file)
573 return single_open(file, long_term_keys_show, inode->i_private);
576 static const struct file_operations long_term_keys_fops = {
577 .open = long_term_keys_open,
580 .release = single_release,
583 static int conn_min_interval_set(void *data, u64 val)
585 struct hci_dev *hdev = data;
587 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
591 hdev->le_conn_min_interval = val;
592 hci_dev_unlock(hdev);
597 static int conn_min_interval_get(void *data, u64 *val)
599 struct hci_dev *hdev = data;
602 *val = hdev->le_conn_min_interval;
603 hci_dev_unlock(hdev);
608 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
609 conn_min_interval_set, "%llu\n");
611 static int conn_max_interval_set(void *data, u64 val)
613 struct hci_dev *hdev = data;
615 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
619 hdev->le_conn_max_interval = val;
620 hci_dev_unlock(hdev);
625 static int conn_max_interval_get(void *data, u64 *val)
627 struct hci_dev *hdev = data;
630 *val = hdev->le_conn_max_interval;
631 hci_dev_unlock(hdev);
636 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
637 conn_max_interval_set, "%llu\n");
639 /* ---- HCI requests ---- */
641 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
643 BT_DBG("%s result 0x%2.2x", hdev->name, result);
645 if (hdev->req_status == HCI_REQ_PEND) {
646 hdev->req_result = result;
647 hdev->req_status = HCI_REQ_DONE;
648 wake_up_interruptible(&hdev->req_wait_q);
652 static void hci_req_cancel(struct hci_dev *hdev, int err)
654 BT_DBG("%s err 0x%2.2x", hdev->name, err);
656 if (hdev->req_status == HCI_REQ_PEND) {
657 hdev->req_result = err;
658 hdev->req_status = HCI_REQ_CANCELED;
659 wake_up_interruptible(&hdev->req_wait_q);
663 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
666 struct hci_ev_cmd_complete *ev;
667 struct hci_event_hdr *hdr;
672 skb = hdev->recv_evt;
673 hdev->recv_evt = NULL;
675 hci_dev_unlock(hdev);
678 return ERR_PTR(-ENODATA);
680 if (skb->len < sizeof(*hdr)) {
681 BT_ERR("Too short HCI event");
685 hdr = (void *) skb->data;
686 skb_pull(skb, HCI_EVENT_HDR_SIZE);
689 if (hdr->evt != event)
694 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
695 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
699 if (skb->len < sizeof(*ev)) {
700 BT_ERR("Too short cmd_complete event");
704 ev = (void *) skb->data;
705 skb_pull(skb, sizeof(*ev));
707 if (opcode == __le16_to_cpu(ev->opcode))
710 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
711 __le16_to_cpu(ev->opcode));
715 return ERR_PTR(-ENODATA);
718 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
719 const void *param, u8 event, u32 timeout)
721 DECLARE_WAITQUEUE(wait, current);
722 struct hci_request req;
725 BT_DBG("%s", hdev->name);
727 hci_req_init(&req, hdev);
729 hci_req_add_ev(&req, opcode, plen, param, event);
731 hdev->req_status = HCI_REQ_PEND;
733 err = hci_req_run(&req, hci_req_sync_complete);
737 add_wait_queue(&hdev->req_wait_q, &wait);
738 set_current_state(TASK_INTERRUPTIBLE);
740 schedule_timeout(timeout);
742 remove_wait_queue(&hdev->req_wait_q, &wait);
744 if (signal_pending(current))
745 return ERR_PTR(-EINTR);
747 switch (hdev->req_status) {
749 err = -bt_to_errno(hdev->req_result);
752 case HCI_REQ_CANCELED:
753 err = -hdev->req_result;
761 hdev->req_status = hdev->req_result = 0;
763 BT_DBG("%s end: err %d", hdev->name, err);
768 return hci_get_cmd_complete(hdev, opcode, event);
770 EXPORT_SYMBOL(__hci_cmd_sync_ev);
772 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
773 const void *param, u32 timeout)
775 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
777 EXPORT_SYMBOL(__hci_cmd_sync);
779 /* Execute request and wait for completion. */
780 static int __hci_req_sync(struct hci_dev *hdev,
781 void (*func)(struct hci_request *req,
783 unsigned long opt, __u32 timeout)
785 struct hci_request req;
786 DECLARE_WAITQUEUE(wait, current);
789 BT_DBG("%s start", hdev->name);
791 hci_req_init(&req, hdev);
793 hdev->req_status = HCI_REQ_PEND;
797 err = hci_req_run(&req, hci_req_sync_complete);
799 hdev->req_status = 0;
801 /* ENODATA means the HCI request command queue is empty.
802 * This can happen when a request with conditionals doesn't
803 * trigger any commands to be sent. This is normal behavior
804 * and should not trigger an error return.
812 add_wait_queue(&hdev->req_wait_q, &wait);
813 set_current_state(TASK_INTERRUPTIBLE);
815 schedule_timeout(timeout);
817 remove_wait_queue(&hdev->req_wait_q, &wait);
819 if (signal_pending(current))
822 switch (hdev->req_status) {
824 err = -bt_to_errno(hdev->req_result);
827 case HCI_REQ_CANCELED:
828 err = -hdev->req_result;
836 hdev->req_status = hdev->req_result = 0;
838 BT_DBG("%s end: err %d", hdev->name, err);
843 static int hci_req_sync(struct hci_dev *hdev,
844 void (*req)(struct hci_request *req,
846 unsigned long opt, __u32 timeout)
850 if (!test_bit(HCI_UP, &hdev->flags))
853 /* Serialize all requests */
855 ret = __hci_req_sync(hdev, req, opt, timeout);
856 hci_req_unlock(hdev);
861 static void hci_reset_req(struct hci_request *req, unsigned long opt)
863 BT_DBG("%s %ld", req->hdev->name, opt);
866 set_bit(HCI_RESET, &req->hdev->flags);
867 hci_req_add(req, HCI_OP_RESET, 0, NULL);
870 static void bredr_init(struct hci_request *req)
872 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
874 /* Read Local Supported Features */
875 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
877 /* Read Local Version */
878 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
880 /* Read BD Address */
881 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
884 static void amp_init(struct hci_request *req)
886 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
888 /* Read Local Version */
889 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
891 /* Read Local Supported Commands */
892 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
894 /* Read Local Supported Features */
895 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
897 /* Read Local AMP Info */
898 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
900 /* Read Data Blk size */
901 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
903 /* Read Flow Control Mode */
904 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
906 /* Read Location Data */
907 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
910 static void hci_init1_req(struct hci_request *req, unsigned long opt)
912 struct hci_dev *hdev = req->hdev;
914 BT_DBG("%s %ld", hdev->name, opt);
917 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
918 hci_reset_req(req, 0);
920 switch (hdev->dev_type) {
930 BT_ERR("Unknown device type %d", hdev->dev_type);
935 static void bredr_setup(struct hci_request *req)
937 struct hci_dev *hdev = req->hdev;
942 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
943 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
945 /* Read Class of Device */
946 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
948 /* Read Local Name */
949 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
951 /* Read Voice Setting */
952 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
954 /* Read Number of Supported IAC */
955 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
957 /* Read Current IAC LAP */
958 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
960 /* Clear Event Filters */
961 flt_type = HCI_FLT_CLEAR_ALL;
962 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
964 /* Connection accept timeout ~20 secs */
965 param = __constant_cpu_to_le16(0x7d00);
966 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
968 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
969 * but it does not support page scan related HCI commands.
971 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
972 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
973 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
977 static void le_setup(struct hci_request *req)
979 struct hci_dev *hdev = req->hdev;
981 /* Read LE Buffer Size */
982 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
984 /* Read LE Local Supported Features */
985 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
987 /* Read LE Advertising Channel TX Power */
988 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
990 /* Read LE White List Size */
991 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
993 /* Read LE Supported States */
994 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
996 /* LE-only controllers have LE implicitly enabled */
997 if (!lmp_bredr_capable(hdev))
998 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1001 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1003 if (lmp_ext_inq_capable(hdev))
1006 if (lmp_inq_rssi_capable(hdev))
1009 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1010 hdev->lmp_subver == 0x0757)
1013 if (hdev->manufacturer == 15) {
1014 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1016 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1018 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1022 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1023 hdev->lmp_subver == 0x1805)
1029 static void hci_setup_inquiry_mode(struct hci_request *req)
1033 mode = hci_get_inquiry_mode(req->hdev);
1035 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1038 static void hci_setup_event_mask(struct hci_request *req)
1040 struct hci_dev *hdev = req->hdev;
1042 /* The second byte is 0xff instead of 0x9f (two reserved bits
1043 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1044 * command otherwise.
1046 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1048 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1049 * any event mask for pre 1.2 devices.
1051 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1054 if (lmp_bredr_capable(hdev)) {
1055 events[4] |= 0x01; /* Flow Specification Complete */
1056 events[4] |= 0x02; /* Inquiry Result with RSSI */
1057 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1058 events[5] |= 0x08; /* Synchronous Connection Complete */
1059 events[5] |= 0x10; /* Synchronous Connection Changed */
1061 /* Use a different default for LE-only devices */
1062 memset(events, 0, sizeof(events));
1063 events[0] |= 0x10; /* Disconnection Complete */
1064 events[0] |= 0x80; /* Encryption Change */
1065 events[1] |= 0x08; /* Read Remote Version Information Complete */
1066 events[1] |= 0x20; /* Command Complete */
1067 events[1] |= 0x40; /* Command Status */
1068 events[1] |= 0x80; /* Hardware Error */
1069 events[2] |= 0x04; /* Number of Completed Packets */
1070 events[3] |= 0x02; /* Data Buffer Overflow */
1071 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1074 if (lmp_inq_rssi_capable(hdev))
1075 events[4] |= 0x02; /* Inquiry Result with RSSI */
1077 if (lmp_sniffsubr_capable(hdev))
1078 events[5] |= 0x20; /* Sniff Subrating */
1080 if (lmp_pause_enc_capable(hdev))
1081 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1083 if (lmp_ext_inq_capable(hdev))
1084 events[5] |= 0x40; /* Extended Inquiry Result */
1086 if (lmp_no_flush_capable(hdev))
1087 events[7] |= 0x01; /* Enhanced Flush Complete */
1089 if (lmp_lsto_capable(hdev))
1090 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1092 if (lmp_ssp_capable(hdev)) {
1093 events[6] |= 0x01; /* IO Capability Request */
1094 events[6] |= 0x02; /* IO Capability Response */
1095 events[6] |= 0x04; /* User Confirmation Request */
1096 events[6] |= 0x08; /* User Passkey Request */
1097 events[6] |= 0x10; /* Remote OOB Data Request */
1098 events[6] |= 0x20; /* Simple Pairing Complete */
1099 events[7] |= 0x04; /* User Passkey Notification */
1100 events[7] |= 0x08; /* Keypress Notification */
1101 events[7] |= 0x10; /* Remote Host Supported
1102 * Features Notification
1106 if (lmp_le_capable(hdev))
1107 events[7] |= 0x20; /* LE Meta-Event */
1109 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1111 if (lmp_le_capable(hdev)) {
1112 memset(events, 0, sizeof(events));
1114 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1115 sizeof(events), events);
1119 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1121 struct hci_dev *hdev = req->hdev;
1123 if (lmp_bredr_capable(hdev))
1126 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1128 if (lmp_le_capable(hdev))
1131 hci_setup_event_mask(req);
1133 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1134 * local supported commands HCI command.
1136 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1137 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1139 if (lmp_ssp_capable(hdev)) {
1140 /* When SSP is available, then the host features page
1141 * should also be available as well. However some
1142 * controllers list the max_page as 0 as long as SSP
1143 * has not been enabled. To achieve proper debugging
1144 * output, force the minimum max_page to 1 at least.
1146 hdev->max_page = 0x01;
1148 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1150 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1151 sizeof(mode), &mode);
1153 struct hci_cp_write_eir cp;
1155 memset(hdev->eir, 0, sizeof(hdev->eir));
1156 memset(&cp, 0, sizeof(cp));
1158 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1162 if (lmp_inq_rssi_capable(hdev))
1163 hci_setup_inquiry_mode(req);
1165 if (lmp_inq_tx_pwr_capable(hdev))
1166 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1168 if (lmp_ext_feat_capable(hdev)) {
1169 struct hci_cp_read_local_ext_features cp;
1172 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1176 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1178 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1183 static void hci_setup_link_policy(struct hci_request *req)
1185 struct hci_dev *hdev = req->hdev;
1186 struct hci_cp_write_def_link_policy cp;
1187 u16 link_policy = 0;
1189 if (lmp_rswitch_capable(hdev))
1190 link_policy |= HCI_LP_RSWITCH;
1191 if (lmp_hold_capable(hdev))
1192 link_policy |= HCI_LP_HOLD;
1193 if (lmp_sniff_capable(hdev))
1194 link_policy |= HCI_LP_SNIFF;
1195 if (lmp_park_capable(hdev))
1196 link_policy |= HCI_LP_PARK;
1198 cp.policy = cpu_to_le16(link_policy);
1199 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1202 static void hci_set_le_support(struct hci_request *req)
1204 struct hci_dev *hdev = req->hdev;
1205 struct hci_cp_write_le_host_supported cp;
1207 /* LE-only devices do not support explicit enablement */
1208 if (!lmp_bredr_capable(hdev))
1211 memset(&cp, 0, sizeof(cp));
1213 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1215 cp.simul = lmp_le_br_capable(hdev);
1218 if (cp.le != lmp_host_le_capable(hdev))
1219 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1223 static void hci_set_event_mask_page_2(struct hci_request *req)
1225 struct hci_dev *hdev = req->hdev;
1226 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1228 /* If Connectionless Slave Broadcast master role is supported
1229 * enable all necessary events for it.
1231 if (hdev->features[2][0] & 0x01) {
1232 events[1] |= 0x40; /* Triggered Clock Capture */
1233 events[1] |= 0x80; /* Synchronization Train Complete */
1234 events[2] |= 0x10; /* Slave Page Response Timeout */
1235 events[2] |= 0x20; /* CSB Channel Map Change */
1238 /* If Connectionless Slave Broadcast slave role is supported
1239 * enable all necessary events for it.
1241 if (hdev->features[2][0] & 0x02) {
1242 events[2] |= 0x01; /* Synchronization Train Received */
1243 events[2] |= 0x02; /* CSB Receive */
1244 events[2] |= 0x04; /* CSB Timeout */
1245 events[2] |= 0x08; /* Truncated Page Complete */
1248 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1251 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1253 struct hci_dev *hdev = req->hdev;
1256 /* Some Broadcom based Bluetooth controllers do not support the
1257 * Delete Stored Link Key command. They are clearly indicating its
1258 * absence in the bit mask of supported commands.
1260 * Check the supported commands and only if the the command is marked
1261 * as supported send it. If not supported assume that the controller
1262 * does not have actual support for stored link keys which makes this
1263 * command redundant anyway.
1265 if (hdev->commands[6] & 0x80) {
1266 struct hci_cp_delete_stored_link_key cp;
1268 bacpy(&cp.bdaddr, BDADDR_ANY);
1269 cp.delete_all = 0x01;
1270 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1274 if (hdev->commands[5] & 0x10)
1275 hci_setup_link_policy(req);
1277 if (lmp_le_capable(hdev)) {
1278 /* If the controller has a public BD_ADDR, then by
1279 * default use that one. If this is a LE only
1280 * controller without one, default to the random
1283 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1284 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1286 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1288 hci_set_le_support(req);
1291 /* Read features beyond page 1 if available */
1292 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1293 struct hci_cp_read_local_ext_features cp;
1296 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1301 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1303 struct hci_dev *hdev = req->hdev;
1305 /* Set event mask page 2 if the HCI command for it is supported */
1306 if (hdev->commands[22] & 0x04)
1307 hci_set_event_mask_page_2(req);
1309 /* Check for Synchronization Train support */
1310 if (hdev->features[2][0] & 0x04)
1311 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1314 static int __hci_init(struct hci_dev *hdev)
1318 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1322 /* The Device Under Test (DUT) mode is special and available for
1323 * all controller types. So just create it early on.
1325 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1326 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1330 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1331 * BR/EDR/LE type controllers. AMP controllers only need the
1334 if (hdev->dev_type != HCI_BREDR)
1337 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1341 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1345 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1349 /* Only create debugfs entries during the initial setup
1350 * phase and not every time the controller gets powered on.
1352 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1355 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1357 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1358 &hdev->manufacturer);
1359 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1360 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1361 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1363 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1365 if (lmp_bredr_capable(hdev)) {
1366 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1367 hdev, &inquiry_cache_fops);
1368 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1369 hdev, &link_keys_fops);
1370 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1371 hdev, &use_debug_keys_fops);
1372 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1373 hdev, &dev_class_fops);
1374 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1375 hdev, &voice_setting_fops);
1378 if (lmp_ssp_capable(hdev)) {
1379 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1380 hdev, &auto_accept_delay_fops);
1381 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1382 hdev, &ssp_debug_mode_fops);
1385 if (lmp_sniff_capable(hdev)) {
1386 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1387 hdev, &idle_timeout_fops);
1388 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1389 hdev, &sniff_min_interval_fops);
1390 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1391 hdev, &sniff_max_interval_fops);
1394 if (lmp_le_capable(hdev)) {
1395 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1396 &hdev->le_white_list_size);
1397 debugfs_create_file("static_address", 0444, hdev->debugfs,
1398 hdev, &static_address_fops);
1399 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1400 hdev, &own_address_type_fops);
1401 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1402 hdev, &long_term_keys_fops);
1403 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1404 hdev, &conn_min_interval_fops);
1405 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1406 hdev, &conn_max_interval_fops);
1412 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1416 BT_DBG("%s %x", req->hdev->name, scan);
1418 /* Inquiry and Page scans */
1419 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1422 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1426 BT_DBG("%s %x", req->hdev->name, auth);
1428 /* Authentication */
1429 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1432 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1436 BT_DBG("%s %x", req->hdev->name, encrypt);
1439 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1442 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1444 __le16 policy = cpu_to_le16(opt);
1446 BT_DBG("%s %x", req->hdev->name, policy);
1448 /* Default link policy */
1449 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1452 /* Get HCI device by index.
1453 * Device is held on return. */
1454 struct hci_dev *hci_dev_get(int index)
1456 struct hci_dev *hdev = NULL, *d;
1458 BT_DBG("%d", index);
1463 read_lock(&hci_dev_list_lock);
1464 list_for_each_entry(d, &hci_dev_list, list) {
1465 if (d->id == index) {
1466 hdev = hci_dev_hold(d);
1470 read_unlock(&hci_dev_list_lock);
1474 /* ---- Inquiry support ---- */
1476 bool hci_discovery_active(struct hci_dev *hdev)
1478 struct discovery_state *discov = &hdev->discovery;
1480 switch (discov->state) {
1481 case DISCOVERY_FINDING:
1482 case DISCOVERY_RESOLVING:
1490 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1492 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1494 if (hdev->discovery.state == state)
1498 case DISCOVERY_STOPPED:
1499 if (hdev->discovery.state != DISCOVERY_STARTING)
1500 mgmt_discovering(hdev, 0);
1502 case DISCOVERY_STARTING:
1504 case DISCOVERY_FINDING:
1505 mgmt_discovering(hdev, 1);
1507 case DISCOVERY_RESOLVING:
1509 case DISCOVERY_STOPPING:
1513 hdev->discovery.state = state;
1516 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1518 struct discovery_state *cache = &hdev->discovery;
1519 struct inquiry_entry *p, *n;
1521 list_for_each_entry_safe(p, n, &cache->all, all) {
1526 INIT_LIST_HEAD(&cache->unknown);
1527 INIT_LIST_HEAD(&cache->resolve);
1530 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1533 struct discovery_state *cache = &hdev->discovery;
1534 struct inquiry_entry *e;
1536 BT_DBG("cache %p, %pMR", cache, bdaddr);
1538 list_for_each_entry(e, &cache->all, all) {
1539 if (!bacmp(&e->data.bdaddr, bdaddr))
1546 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1549 struct discovery_state *cache = &hdev->discovery;
1550 struct inquiry_entry *e;
1552 BT_DBG("cache %p, %pMR", cache, bdaddr);
1554 list_for_each_entry(e, &cache->unknown, list) {
1555 if (!bacmp(&e->data.bdaddr, bdaddr))
1562 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1566 struct discovery_state *cache = &hdev->discovery;
1567 struct inquiry_entry *e;
1569 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1571 list_for_each_entry(e, &cache->resolve, list) {
1572 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1574 if (!bacmp(&e->data.bdaddr, bdaddr))
1581 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1582 struct inquiry_entry *ie)
1584 struct discovery_state *cache = &hdev->discovery;
1585 struct list_head *pos = &cache->resolve;
1586 struct inquiry_entry *p;
1588 list_del(&ie->list);
1590 list_for_each_entry(p, &cache->resolve, list) {
1591 if (p->name_state != NAME_PENDING &&
1592 abs(p->data.rssi) >= abs(ie->data.rssi))
1597 list_add(&ie->list, pos);
1600 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1601 bool name_known, bool *ssp)
1603 struct discovery_state *cache = &hdev->discovery;
1604 struct inquiry_entry *ie;
1606 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1608 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1611 *ssp = data->ssp_mode;
1613 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1615 if (ie->data.ssp_mode && ssp)
1618 if (ie->name_state == NAME_NEEDED &&
1619 data->rssi != ie->data.rssi) {
1620 ie->data.rssi = data->rssi;
1621 hci_inquiry_cache_update_resolve(hdev, ie);
1627 /* Entry not in the cache. Add new one. */
1628 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1632 list_add(&ie->all, &cache->all);
1635 ie->name_state = NAME_KNOWN;
1637 ie->name_state = NAME_NOT_KNOWN;
1638 list_add(&ie->list, &cache->unknown);
1642 if (name_known && ie->name_state != NAME_KNOWN &&
1643 ie->name_state != NAME_PENDING) {
1644 ie->name_state = NAME_KNOWN;
1645 list_del(&ie->list);
1648 memcpy(&ie->data, data, sizeof(*data));
1649 ie->timestamp = jiffies;
1650 cache->timestamp = jiffies;
1652 if (ie->name_state == NAME_NOT_KNOWN)
1658 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1660 struct discovery_state *cache = &hdev->discovery;
1661 struct inquiry_info *info = (struct inquiry_info *) buf;
1662 struct inquiry_entry *e;
1665 list_for_each_entry(e, &cache->all, all) {
1666 struct inquiry_data *data = &e->data;
1671 bacpy(&info->bdaddr, &data->bdaddr);
1672 info->pscan_rep_mode = data->pscan_rep_mode;
1673 info->pscan_period_mode = data->pscan_period_mode;
1674 info->pscan_mode = data->pscan_mode;
1675 memcpy(info->dev_class, data->dev_class, 3);
1676 info->clock_offset = data->clock_offset;
1682 BT_DBG("cache %p, copied %d", cache, copied);
1686 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1688 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1689 struct hci_dev *hdev = req->hdev;
1690 struct hci_cp_inquiry cp;
1692 BT_DBG("%s", hdev->name);
1694 if (test_bit(HCI_INQUIRY, &hdev->flags))
1698 memcpy(&cp.lap, &ir->lap, 3);
1699 cp.length = ir->length;
1700 cp.num_rsp = ir->num_rsp;
1701 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1704 static int wait_inquiry(void *word)
1707 return signal_pending(current);
1710 int hci_inquiry(void __user *arg)
1712 __u8 __user *ptr = arg;
1713 struct hci_inquiry_req ir;
1714 struct hci_dev *hdev;
1715 int err = 0, do_inquiry = 0, max_rsp;
1719 if (copy_from_user(&ir, ptr, sizeof(ir)))
1722 hdev = hci_dev_get(ir.dev_id);
1726 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1731 if (hdev->dev_type != HCI_BREDR) {
1736 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1742 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1743 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1744 hci_inquiry_cache_flush(hdev);
1747 hci_dev_unlock(hdev);
1749 timeo = ir.length * msecs_to_jiffies(2000);
1752 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1757 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1758 * cleared). If it is interrupted by a signal, return -EINTR.
1760 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1761 TASK_INTERRUPTIBLE))
1765 /* for unlimited number of responses we will use buffer with
1768 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1770 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1771 * copy it to the user space.
1773 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1780 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1781 hci_dev_unlock(hdev);
1783 BT_DBG("num_rsp %d", ir.num_rsp);
1785 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1787 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1800 static int hci_dev_do_open(struct hci_dev *hdev)
1804 BT_DBG("%s %p", hdev->name, hdev);
1808 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1813 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1814 /* Check for rfkill but allow the HCI setup stage to
1815 * proceed (which in itself doesn't cause any RF activity).
1817 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1822 /* Check for valid public address or a configured static
1823 * random adddress, but let the HCI setup proceed to
1824 * be able to determine if there is a public address
1827 * This check is only valid for BR/EDR controllers
1828 * since AMP controllers do not have an address.
1830 if (hdev->dev_type == HCI_BREDR &&
1831 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1832 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1833 ret = -EADDRNOTAVAIL;
1838 if (test_bit(HCI_UP, &hdev->flags)) {
1843 if (hdev->open(hdev)) {
1848 atomic_set(&hdev->cmd_cnt, 1);
1849 set_bit(HCI_INIT, &hdev->flags);
1851 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1852 ret = hdev->setup(hdev);
1855 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1856 set_bit(HCI_RAW, &hdev->flags);
1858 if (!test_bit(HCI_RAW, &hdev->flags) &&
1859 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1860 ret = __hci_init(hdev);
1863 clear_bit(HCI_INIT, &hdev->flags);
1867 set_bit(HCI_UP, &hdev->flags);
1868 hci_notify(hdev, HCI_DEV_UP);
1869 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1870 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1871 hdev->dev_type == HCI_BREDR) {
1873 mgmt_powered(hdev, 1);
1874 hci_dev_unlock(hdev);
1877 /* Init failed, cleanup */
1878 flush_work(&hdev->tx_work);
1879 flush_work(&hdev->cmd_work);
1880 flush_work(&hdev->rx_work);
1882 skb_queue_purge(&hdev->cmd_q);
1883 skb_queue_purge(&hdev->rx_q);
1888 if (hdev->sent_cmd) {
1889 kfree_skb(hdev->sent_cmd);
1890 hdev->sent_cmd = NULL;
1898 hci_req_unlock(hdev);
1902 /* ---- HCI ioctl helpers ---- */
1904 int hci_dev_open(__u16 dev)
1906 struct hci_dev *hdev;
1909 hdev = hci_dev_get(dev);
1913 /* We need to ensure that no other power on/off work is pending
1914 * before proceeding to call hci_dev_do_open. This is
1915 * particularly important if the setup procedure has not yet
1918 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1919 cancel_delayed_work(&hdev->power_off);
1921 /* After this call it is guaranteed that the setup procedure
1922 * has finished. This means that error conditions like RFKILL
1923 * or no valid public or static random address apply.
1925 flush_workqueue(hdev->req_workqueue);
1927 err = hci_dev_do_open(hdev);
1934 static int hci_dev_do_close(struct hci_dev *hdev)
1936 BT_DBG("%s %p", hdev->name, hdev);
1938 cancel_delayed_work(&hdev->power_off);
1940 hci_req_cancel(hdev, ENODEV);
1943 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1944 del_timer_sync(&hdev->cmd_timer);
1945 hci_req_unlock(hdev);
1949 /* Flush RX and TX works */
1950 flush_work(&hdev->tx_work);
1951 flush_work(&hdev->rx_work);
1953 if (hdev->discov_timeout > 0) {
1954 cancel_delayed_work(&hdev->discov_off);
1955 hdev->discov_timeout = 0;
1956 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1957 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1960 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1961 cancel_delayed_work(&hdev->service_cache);
1963 cancel_delayed_work_sync(&hdev->le_scan_disable);
1966 hci_inquiry_cache_flush(hdev);
1967 hci_conn_hash_flush(hdev);
1968 hci_dev_unlock(hdev);
1970 hci_notify(hdev, HCI_DEV_DOWN);
1976 skb_queue_purge(&hdev->cmd_q);
1977 atomic_set(&hdev->cmd_cnt, 1);
1978 if (!test_bit(HCI_RAW, &hdev->flags) &&
1979 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1980 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1981 set_bit(HCI_INIT, &hdev->flags);
1982 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1983 clear_bit(HCI_INIT, &hdev->flags);
1986 /* flush cmd work */
1987 flush_work(&hdev->cmd_work);
1990 skb_queue_purge(&hdev->rx_q);
1991 skb_queue_purge(&hdev->cmd_q);
1992 skb_queue_purge(&hdev->raw_q);
1994 /* Drop last sent command */
1995 if (hdev->sent_cmd) {
1996 del_timer_sync(&hdev->cmd_timer);
1997 kfree_skb(hdev->sent_cmd);
1998 hdev->sent_cmd = NULL;
2001 kfree_skb(hdev->recv_evt);
2002 hdev->recv_evt = NULL;
2004 /* After this point our queues are empty
2005 * and no tasks are scheduled. */
2010 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2012 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2013 if (hdev->dev_type == HCI_BREDR) {
2015 mgmt_powered(hdev, 0);
2016 hci_dev_unlock(hdev);
2020 /* Controller radio is available but is currently powered down */
2021 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2023 memset(hdev->eir, 0, sizeof(hdev->eir));
2024 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2026 hci_req_unlock(hdev);
2032 int hci_dev_close(__u16 dev)
2034 struct hci_dev *hdev;
2037 hdev = hci_dev_get(dev);
2041 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2046 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2047 cancel_delayed_work(&hdev->power_off);
2049 err = hci_dev_do_close(hdev);
2056 int hci_dev_reset(__u16 dev)
2058 struct hci_dev *hdev;
2061 hdev = hci_dev_get(dev);
2067 if (!test_bit(HCI_UP, &hdev->flags)) {
2072 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2078 skb_queue_purge(&hdev->rx_q);
2079 skb_queue_purge(&hdev->cmd_q);
2082 hci_inquiry_cache_flush(hdev);
2083 hci_conn_hash_flush(hdev);
2084 hci_dev_unlock(hdev);
2089 atomic_set(&hdev->cmd_cnt, 1);
2090 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2092 if (!test_bit(HCI_RAW, &hdev->flags))
2093 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2096 hci_req_unlock(hdev);
2101 int hci_dev_reset_stat(__u16 dev)
2103 struct hci_dev *hdev;
2106 hdev = hci_dev_get(dev);
2110 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2115 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2122 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2124 struct hci_dev *hdev;
2125 struct hci_dev_req dr;
2128 if (copy_from_user(&dr, arg, sizeof(dr)))
2131 hdev = hci_dev_get(dr.dev_id);
2135 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2140 if (hdev->dev_type != HCI_BREDR) {
2145 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2152 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2157 if (!lmp_encrypt_capable(hdev)) {
2162 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2163 /* Auth must be enabled first */
2164 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2170 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2175 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2180 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2184 case HCISETLINKMODE:
2185 hdev->link_mode = ((__u16) dr.dev_opt) &
2186 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2190 hdev->pkt_type = (__u16) dr.dev_opt;
2194 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2195 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2199 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2200 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2213 int hci_get_dev_list(void __user *arg)
2215 struct hci_dev *hdev;
2216 struct hci_dev_list_req *dl;
2217 struct hci_dev_req *dr;
2218 int n = 0, size, err;
2221 if (get_user(dev_num, (__u16 __user *) arg))
2224 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2227 size = sizeof(*dl) + dev_num * sizeof(*dr);
2229 dl = kzalloc(size, GFP_KERNEL);
2235 read_lock(&hci_dev_list_lock);
2236 list_for_each_entry(hdev, &hci_dev_list, list) {
2237 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2238 cancel_delayed_work(&hdev->power_off);
2240 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2241 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2243 (dr + n)->dev_id = hdev->id;
2244 (dr + n)->dev_opt = hdev->flags;
2249 read_unlock(&hci_dev_list_lock);
2252 size = sizeof(*dl) + n * sizeof(*dr);
2254 err = copy_to_user(arg, dl, size);
2257 return err ? -EFAULT : 0;
2260 int hci_get_dev_info(void __user *arg)
2262 struct hci_dev *hdev;
2263 struct hci_dev_info di;
2266 if (copy_from_user(&di, arg, sizeof(di)))
2269 hdev = hci_dev_get(di.dev_id);
2273 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2274 cancel_delayed_work_sync(&hdev->power_off);
2276 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2277 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2279 strcpy(di.name, hdev->name);
2280 di.bdaddr = hdev->bdaddr;
2281 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2282 di.flags = hdev->flags;
2283 di.pkt_type = hdev->pkt_type;
2284 if (lmp_bredr_capable(hdev)) {
2285 di.acl_mtu = hdev->acl_mtu;
2286 di.acl_pkts = hdev->acl_pkts;
2287 di.sco_mtu = hdev->sco_mtu;
2288 di.sco_pkts = hdev->sco_pkts;
2290 di.acl_mtu = hdev->le_mtu;
2291 di.acl_pkts = hdev->le_pkts;
2295 di.link_policy = hdev->link_policy;
2296 di.link_mode = hdev->link_mode;
2298 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2299 memcpy(&di.features, &hdev->features, sizeof(di.features));
2301 if (copy_to_user(arg, &di, sizeof(di)))
2309 /* ---- Interface to HCI drivers ---- */
2311 static int hci_rfkill_set_block(void *data, bool blocked)
2313 struct hci_dev *hdev = data;
2315 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2317 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2321 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2322 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2323 hci_dev_do_close(hdev);
2325 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2331 static const struct rfkill_ops hci_rfkill_ops = {
2332 .set_block = hci_rfkill_set_block,
2335 static void hci_power_on(struct work_struct *work)
2337 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2340 BT_DBG("%s", hdev->name);
2342 err = hci_dev_do_open(hdev);
2344 mgmt_set_powered_failed(hdev, err);
2348 /* During the HCI setup phase, a few error conditions are
2349 * ignored and they need to be checked now. If they are still
2350 * valid, it is important to turn the device back off.
2352 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2353 (hdev->dev_type == HCI_BREDR &&
2354 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2355 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2356 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2357 hci_dev_do_close(hdev);
2358 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2359 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2360 HCI_AUTO_OFF_TIMEOUT);
2363 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2364 mgmt_index_added(hdev);
2367 static void hci_power_off(struct work_struct *work)
2369 struct hci_dev *hdev = container_of(work, struct hci_dev,
2372 BT_DBG("%s", hdev->name);
2374 hci_dev_do_close(hdev);
2377 static void hci_discov_off(struct work_struct *work)
2379 struct hci_dev *hdev;
2381 hdev = container_of(work, struct hci_dev, discov_off.work);
2383 BT_DBG("%s", hdev->name);
2385 mgmt_discoverable_timeout(hdev);
2388 int hci_uuids_clear(struct hci_dev *hdev)
2390 struct bt_uuid *uuid, *tmp;
2392 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2393 list_del(&uuid->list);
2400 int hci_link_keys_clear(struct hci_dev *hdev)
2402 struct list_head *p, *n;
2404 list_for_each_safe(p, n, &hdev->link_keys) {
2405 struct link_key *key;
2407 key = list_entry(p, struct link_key, list);
2416 int hci_smp_ltks_clear(struct hci_dev *hdev)
2418 struct smp_ltk *k, *tmp;
2420 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2428 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2432 list_for_each_entry(k, &hdev->link_keys, list)
2433 if (bacmp(bdaddr, &k->bdaddr) == 0)
2439 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2440 u8 key_type, u8 old_key_type)
2443 if (key_type < 0x03)
2446 /* Debug keys are insecure so don't store them persistently */
2447 if (key_type == HCI_LK_DEBUG_COMBINATION)
2450 /* Changed combination key and there's no previous one */
2451 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2454 /* Security mode 3 case */
2458 /* Neither local nor remote side had no-bonding as requirement */
2459 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2462 /* Local side had dedicated bonding as requirement */
2463 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2466 /* Remote side had dedicated bonding as requirement */
2467 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2470 /* If none of the above criteria match, then don't store the key
2475 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
2479 list_for_each_entry(k, &hdev->long_term_keys, list) {
2480 if (k->ediv != ediv ||
2481 memcmp(rand, k->rand, sizeof(k->rand)))
2490 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2495 list_for_each_entry(k, &hdev->long_term_keys, list)
2496 if (addr_type == k->bdaddr_type &&
2497 bacmp(bdaddr, &k->bdaddr) == 0)
2503 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2504 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2506 struct link_key *key, *old_key;
2510 old_key = hci_find_link_key(hdev, bdaddr);
2512 old_key_type = old_key->type;
2515 old_key_type = conn ? conn->key_type : 0xff;
2516 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2519 list_add(&key->list, &hdev->link_keys);
2522 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2524 /* Some buggy controller combinations generate a changed
2525 * combination key for legacy pairing even when there's no
2527 if (type == HCI_LK_CHANGED_COMBINATION &&
2528 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2529 type = HCI_LK_COMBINATION;
2531 conn->key_type = type;
2534 bacpy(&key->bdaddr, bdaddr);
2535 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2536 key->pin_len = pin_len;
2538 if (type == HCI_LK_CHANGED_COMBINATION)
2539 key->type = old_key_type;
2546 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2548 mgmt_new_link_key(hdev, key, persistent);
2551 conn->flush_key = !persistent;
2556 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
2557 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
2560 struct smp_ltk *key, *old_key;
2562 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2565 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2569 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2572 list_add(&key->list, &hdev->long_term_keys);
2575 bacpy(&key->bdaddr, bdaddr);
2576 key->bdaddr_type = addr_type;
2577 memcpy(key->val, tk, sizeof(key->val));
2578 key->authenticated = authenticated;
2580 key->enc_size = enc_size;
2582 memcpy(key->rand, rand, sizeof(key->rand));
2587 if (type & HCI_SMP_LTK)
2588 mgmt_new_ltk(hdev, key, 1);
2593 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2595 struct link_key *key;
2597 key = hci_find_link_key(hdev, bdaddr);
2601 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2603 list_del(&key->list);
2609 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2611 struct smp_ltk *k, *tmp;
2613 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2614 if (bacmp(bdaddr, &k->bdaddr))
2617 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2626 /* HCI command timer function */
2627 static void hci_cmd_timeout(unsigned long arg)
2629 struct hci_dev *hdev = (void *) arg;
2631 if (hdev->sent_cmd) {
2632 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2633 u16 opcode = __le16_to_cpu(sent->opcode);
2635 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2637 BT_ERR("%s command tx timeout", hdev->name);
2640 atomic_set(&hdev->cmd_cnt, 1);
2641 queue_work(hdev->workqueue, &hdev->cmd_work);
2644 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2647 struct oob_data *data;
2649 list_for_each_entry(data, &hdev->remote_oob_data, list)
2650 if (bacmp(bdaddr, &data->bdaddr) == 0)
2656 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2658 struct oob_data *data;
2660 data = hci_find_remote_oob_data(hdev, bdaddr);
2664 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2666 list_del(&data->list);
2672 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2674 struct oob_data *data, *n;
2676 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2677 list_del(&data->list);
2684 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2687 struct oob_data *data;
2689 data = hci_find_remote_oob_data(hdev, bdaddr);
2692 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2696 bacpy(&data->bdaddr, bdaddr);
2697 list_add(&data->list, &hdev->remote_oob_data);
2700 memcpy(data->hash, hash, sizeof(data->hash));
2701 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2703 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2708 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2709 bdaddr_t *bdaddr, u8 type)
2711 struct bdaddr_list *b;
2713 list_for_each_entry(b, &hdev->blacklist, list) {
2714 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2721 int hci_blacklist_clear(struct hci_dev *hdev)
2723 struct list_head *p, *n;
2725 list_for_each_safe(p, n, &hdev->blacklist) {
2726 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2735 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2737 struct bdaddr_list *entry;
2739 if (!bacmp(bdaddr, BDADDR_ANY))
2742 if (hci_blacklist_lookup(hdev, bdaddr, type))
2745 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2749 bacpy(&entry->bdaddr, bdaddr);
2750 entry->bdaddr_type = type;
2752 list_add(&entry->list, &hdev->blacklist);
2754 return mgmt_device_blocked(hdev, bdaddr, type);
2757 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2759 struct bdaddr_list *entry;
2761 if (!bacmp(bdaddr, BDADDR_ANY))
2762 return hci_blacklist_clear(hdev);
2764 entry = hci_blacklist_lookup(hdev, bdaddr, type);
2768 list_del(&entry->list);
2771 return mgmt_device_unblocked(hdev, bdaddr, type);
2774 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2777 BT_ERR("Failed to start inquiry: status %d", status);
2780 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2781 hci_dev_unlock(hdev);
2786 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2788 /* General inquiry access code (GIAC) */
2789 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2790 struct hci_request req;
2791 struct hci_cp_inquiry cp;
2795 BT_ERR("Failed to disable LE scanning: status %d", status);
2799 switch (hdev->discovery.type) {
2800 case DISCOV_TYPE_LE:
2802 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2803 hci_dev_unlock(hdev);
2806 case DISCOV_TYPE_INTERLEAVED:
2807 hci_req_init(&req, hdev);
2809 memset(&cp, 0, sizeof(cp));
2810 memcpy(&cp.lap, lap, sizeof(cp.lap));
2811 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2812 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2816 hci_inquiry_cache_flush(hdev);
2818 err = hci_req_run(&req, inquiry_complete);
2820 BT_ERR("Inquiry request failed: err %d", err);
2821 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2824 hci_dev_unlock(hdev);
2829 static void le_scan_disable_work(struct work_struct *work)
2831 struct hci_dev *hdev = container_of(work, struct hci_dev,
2832 le_scan_disable.work);
2833 struct hci_cp_le_set_scan_enable cp;
2834 struct hci_request req;
2837 BT_DBG("%s", hdev->name);
2839 hci_req_init(&req, hdev);
2841 memset(&cp, 0, sizeof(cp));
2842 cp.enable = LE_SCAN_DISABLE;
2843 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2845 err = hci_req_run(&req, le_scan_disable_work_complete);
2847 BT_ERR("Disable LE scanning request failed: err %d", err);
2850 /* Alloc HCI device */
2851 struct hci_dev *hci_alloc_dev(void)
2853 struct hci_dev *hdev;
2855 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2859 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2860 hdev->esco_type = (ESCO_HV1);
2861 hdev->link_mode = (HCI_LM_ACCEPT);
2862 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2863 hdev->io_capability = 0x03; /* No Input No Output */
2864 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2865 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2867 hdev->sniff_max_interval = 800;
2868 hdev->sniff_min_interval = 80;
2870 hdev->le_scan_interval = 0x0060;
2871 hdev->le_scan_window = 0x0030;
2872 hdev->le_conn_min_interval = 0x0028;
2873 hdev->le_conn_max_interval = 0x0038;
2875 mutex_init(&hdev->lock);
2876 mutex_init(&hdev->req_lock);
2878 INIT_LIST_HEAD(&hdev->mgmt_pending);
2879 INIT_LIST_HEAD(&hdev->blacklist);
2880 INIT_LIST_HEAD(&hdev->uuids);
2881 INIT_LIST_HEAD(&hdev->link_keys);
2882 INIT_LIST_HEAD(&hdev->long_term_keys);
2883 INIT_LIST_HEAD(&hdev->remote_oob_data);
2884 INIT_LIST_HEAD(&hdev->conn_hash.list);
2886 INIT_WORK(&hdev->rx_work, hci_rx_work);
2887 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2888 INIT_WORK(&hdev->tx_work, hci_tx_work);
2889 INIT_WORK(&hdev->power_on, hci_power_on);
2891 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2892 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2893 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2895 skb_queue_head_init(&hdev->rx_q);
2896 skb_queue_head_init(&hdev->cmd_q);
2897 skb_queue_head_init(&hdev->raw_q);
2899 init_waitqueue_head(&hdev->req_wait_q);
2901 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2903 hci_init_sysfs(hdev);
2904 discovery_init(hdev);
2908 EXPORT_SYMBOL(hci_alloc_dev);
2910 /* Free HCI device */
2911 void hci_free_dev(struct hci_dev *hdev)
2913 /* will free via device release */
2914 put_device(&hdev->dev);
2916 EXPORT_SYMBOL(hci_free_dev);
2918 /* Register HCI device */
2919 int hci_register_dev(struct hci_dev *hdev)
2923 if (!hdev->open || !hdev->close)
2926 /* Do not allow HCI_AMP devices to register at index 0,
2927 * so the index can be used as the AMP controller ID.
2929 switch (hdev->dev_type) {
2931 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2934 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2943 sprintf(hdev->name, "hci%d", id);
2946 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2948 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2949 WQ_MEM_RECLAIM, 1, hdev->name);
2950 if (!hdev->workqueue) {
2955 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2956 WQ_MEM_RECLAIM, 1, hdev->name);
2957 if (!hdev->req_workqueue) {
2958 destroy_workqueue(hdev->workqueue);
2963 if (!IS_ERR_OR_NULL(bt_debugfs))
2964 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2966 dev_set_name(&hdev->dev, "%s", hdev->name);
2968 error = device_add(&hdev->dev);
2972 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2973 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2976 if (rfkill_register(hdev->rfkill) < 0) {
2977 rfkill_destroy(hdev->rfkill);
2978 hdev->rfkill = NULL;
2982 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2983 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2985 set_bit(HCI_SETUP, &hdev->dev_flags);
2986 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2988 if (hdev->dev_type == HCI_BREDR) {
2989 /* Assume BR/EDR support until proven otherwise (such as
2990 * through reading supported features during init.
2992 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2995 write_lock(&hci_dev_list_lock);
2996 list_add(&hdev->list, &hci_dev_list);
2997 write_unlock(&hci_dev_list_lock);
2999 hci_notify(hdev, HCI_DEV_REG);
3002 queue_work(hdev->req_workqueue, &hdev->power_on);
3007 destroy_workqueue(hdev->workqueue);
3008 destroy_workqueue(hdev->req_workqueue);
3010 ida_simple_remove(&hci_index_ida, hdev->id);
3014 EXPORT_SYMBOL(hci_register_dev);
3016 /* Unregister HCI device */
3017 void hci_unregister_dev(struct hci_dev *hdev)
3021 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3023 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3027 write_lock(&hci_dev_list_lock);
3028 list_del(&hdev->list);
3029 write_unlock(&hci_dev_list_lock);
3031 hci_dev_do_close(hdev);
3033 for (i = 0; i < NUM_REASSEMBLY; i++)
3034 kfree_skb(hdev->reassembly[i]);
3036 cancel_work_sync(&hdev->power_on);
3038 if (!test_bit(HCI_INIT, &hdev->flags) &&
3039 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
3041 mgmt_index_removed(hdev);
3042 hci_dev_unlock(hdev);
3045 /* mgmt_index_removed should take care of emptying the
3047 BUG_ON(!list_empty(&hdev->mgmt_pending));
3049 hci_notify(hdev, HCI_DEV_UNREG);
3052 rfkill_unregister(hdev->rfkill);
3053 rfkill_destroy(hdev->rfkill);
3056 device_del(&hdev->dev);
3058 debugfs_remove_recursive(hdev->debugfs);
3060 destroy_workqueue(hdev->workqueue);
3061 destroy_workqueue(hdev->req_workqueue);
3064 hci_blacklist_clear(hdev);
3065 hci_uuids_clear(hdev);
3066 hci_link_keys_clear(hdev);
3067 hci_smp_ltks_clear(hdev);
3068 hci_remote_oob_data_clear(hdev);
3069 hci_dev_unlock(hdev);
3073 ida_simple_remove(&hci_index_ida, id);
3075 EXPORT_SYMBOL(hci_unregister_dev);
3077 /* Suspend HCI device */
3078 int hci_suspend_dev(struct hci_dev *hdev)
3080 hci_notify(hdev, HCI_DEV_SUSPEND);
3083 EXPORT_SYMBOL(hci_suspend_dev);
3085 /* Resume HCI device */
3086 int hci_resume_dev(struct hci_dev *hdev)
3088 hci_notify(hdev, HCI_DEV_RESUME);
3091 EXPORT_SYMBOL(hci_resume_dev);
3093 /* Receive frame from HCI drivers */
3094 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3096 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3097 && !test_bit(HCI_INIT, &hdev->flags))) {
3103 bt_cb(skb)->incoming = 1;
3106 __net_timestamp(skb);
3108 skb_queue_tail(&hdev->rx_q, skb);
3109 queue_work(hdev->workqueue, &hdev->rx_work);
3113 EXPORT_SYMBOL(hci_recv_frame);
3115 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3116 int count, __u8 index)
3121 struct sk_buff *skb;
3122 struct bt_skb_cb *scb;
3124 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3125 index >= NUM_REASSEMBLY)
3128 skb = hdev->reassembly[index];
3132 case HCI_ACLDATA_PKT:
3133 len = HCI_MAX_FRAME_SIZE;
3134 hlen = HCI_ACL_HDR_SIZE;
3137 len = HCI_MAX_EVENT_SIZE;
3138 hlen = HCI_EVENT_HDR_SIZE;
3140 case HCI_SCODATA_PKT:
3141 len = HCI_MAX_SCO_SIZE;
3142 hlen = HCI_SCO_HDR_SIZE;
3146 skb = bt_skb_alloc(len, GFP_ATOMIC);
3150 scb = (void *) skb->cb;
3152 scb->pkt_type = type;
3154 hdev->reassembly[index] = skb;
3158 scb = (void *) skb->cb;
3159 len = min_t(uint, scb->expect, count);
3161 memcpy(skb_put(skb, len), data, len);
3170 if (skb->len == HCI_EVENT_HDR_SIZE) {
3171 struct hci_event_hdr *h = hci_event_hdr(skb);
3172 scb->expect = h->plen;
3174 if (skb_tailroom(skb) < scb->expect) {
3176 hdev->reassembly[index] = NULL;
3182 case HCI_ACLDATA_PKT:
3183 if (skb->len == HCI_ACL_HDR_SIZE) {
3184 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3185 scb->expect = __le16_to_cpu(h->dlen);
3187 if (skb_tailroom(skb) < scb->expect) {
3189 hdev->reassembly[index] = NULL;
3195 case HCI_SCODATA_PKT:
3196 if (skb->len == HCI_SCO_HDR_SIZE) {
3197 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3198 scb->expect = h->dlen;
3200 if (skb_tailroom(skb) < scb->expect) {
3202 hdev->reassembly[index] = NULL;
3209 if (scb->expect == 0) {
3210 /* Complete frame */
3212 bt_cb(skb)->pkt_type = type;
3213 hci_recv_frame(hdev, skb);
3215 hdev->reassembly[index] = NULL;
3223 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3227 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3231 rem = hci_reassembly(hdev, type, data, count, type - 1);
3235 data += (count - rem);
3241 EXPORT_SYMBOL(hci_recv_fragment);
3243 #define STREAM_REASSEMBLY 0
3245 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3251 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3254 struct { char type; } *pkt;
3256 /* Start of the frame */
3263 type = bt_cb(skb)->pkt_type;
3265 rem = hci_reassembly(hdev, type, data, count,
3270 data += (count - rem);
3276 EXPORT_SYMBOL(hci_recv_stream_fragment);
3278 /* ---- Interface to upper protocols ---- */
3280 int hci_register_cb(struct hci_cb *cb)
3282 BT_DBG("%p name %s", cb, cb->name);
3284 write_lock(&hci_cb_list_lock);
3285 list_add(&cb->list, &hci_cb_list);
3286 write_unlock(&hci_cb_list_lock);
3290 EXPORT_SYMBOL(hci_register_cb);
3292 int hci_unregister_cb(struct hci_cb *cb)
3294 BT_DBG("%p name %s", cb, cb->name);
3296 write_lock(&hci_cb_list_lock);
3297 list_del(&cb->list);
3298 write_unlock(&hci_cb_list_lock);
3302 EXPORT_SYMBOL(hci_unregister_cb);
3304 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3306 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3309 __net_timestamp(skb);
3311 /* Send copy to monitor */
3312 hci_send_to_monitor(hdev, skb);
3314 if (atomic_read(&hdev->promisc)) {
3315 /* Send copy to the sockets */
3316 hci_send_to_sock(hdev, skb);
3319 /* Get rid of skb owner, prior to sending to the driver. */
3322 if (hdev->send(hdev, skb) < 0)
3323 BT_ERR("%s sending frame failed", hdev->name);
3326 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3328 skb_queue_head_init(&req->cmd_q);
3333 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3335 struct hci_dev *hdev = req->hdev;
3336 struct sk_buff *skb;
3337 unsigned long flags;
3339 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3341 /* If an error occured during request building, remove all HCI
3342 * commands queued on the HCI request queue.
3345 skb_queue_purge(&req->cmd_q);
3349 /* Do not allow empty requests */
3350 if (skb_queue_empty(&req->cmd_q))
3353 skb = skb_peek_tail(&req->cmd_q);
3354 bt_cb(skb)->req.complete = complete;
3356 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3357 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3358 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3360 queue_work(hdev->workqueue, &hdev->cmd_work);
3365 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
3366 u32 plen, const void *param)
3368 int len = HCI_COMMAND_HDR_SIZE + plen;
3369 struct hci_command_hdr *hdr;
3370 struct sk_buff *skb;
3372 skb = bt_skb_alloc(len, GFP_ATOMIC);
3376 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
3377 hdr->opcode = cpu_to_le16(opcode);
3381 memcpy(skb_put(skb, plen), param, plen);
3383 BT_DBG("skb len %d", skb->len);
3385 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
3390 /* Send HCI command */
3391 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3394 struct sk_buff *skb;
3396 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3398 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3400 BT_ERR("%s no memory for command", hdev->name);
3404 /* Stand-alone HCI commands must be flaged as
3405 * single-command requests.
3407 bt_cb(skb)->req.start = true;
3409 skb_queue_tail(&hdev->cmd_q, skb);
3410 queue_work(hdev->workqueue, &hdev->cmd_work);
3415 /* Queue a command to an asynchronous HCI request */
3416 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3417 const void *param, u8 event)
3419 struct hci_dev *hdev = req->hdev;
3420 struct sk_buff *skb;
3422 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3424 /* If an error occured during request building, there is no point in
3425 * queueing the HCI command. We can simply return.
3430 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3432 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3433 hdev->name, opcode);
3438 if (skb_queue_empty(&req->cmd_q))
3439 bt_cb(skb)->req.start = true;
3441 bt_cb(skb)->req.event = event;
3443 skb_queue_tail(&req->cmd_q, skb);
3446 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3449 hci_req_add_ev(req, opcode, plen, param, 0);
3452 /* Get data from the previously sent command */
3453 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3455 struct hci_command_hdr *hdr;
3457 if (!hdev->sent_cmd)
3460 hdr = (void *) hdev->sent_cmd->data;
3462 if (hdr->opcode != cpu_to_le16(opcode))
3465 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3467 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3471 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3473 struct hci_acl_hdr *hdr;
3476 skb_push(skb, HCI_ACL_HDR_SIZE);
3477 skb_reset_transport_header(skb);
3478 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3479 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3480 hdr->dlen = cpu_to_le16(len);
3483 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3484 struct sk_buff *skb, __u16 flags)
3486 struct hci_conn *conn = chan->conn;
3487 struct hci_dev *hdev = conn->hdev;
3488 struct sk_buff *list;
3490 skb->len = skb_headlen(skb);
3493 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3495 switch (hdev->dev_type) {
3497 hci_add_acl_hdr(skb, conn->handle, flags);
3500 hci_add_acl_hdr(skb, chan->handle, flags);
3503 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3507 list = skb_shinfo(skb)->frag_list;
3509 /* Non fragmented */
3510 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3512 skb_queue_tail(queue, skb);
3515 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3517 skb_shinfo(skb)->frag_list = NULL;
3519 /* Queue all fragments atomically */
3520 spin_lock(&queue->lock);
3522 __skb_queue_tail(queue, skb);
3524 flags &= ~ACL_START;
3527 skb = list; list = list->next;
3529 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3530 hci_add_acl_hdr(skb, conn->handle, flags);
3532 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3534 __skb_queue_tail(queue, skb);
3537 spin_unlock(&queue->lock);
3541 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3543 struct hci_dev *hdev = chan->conn->hdev;
3545 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3547 hci_queue_acl(chan, &chan->data_q, skb, flags);
3549 queue_work(hdev->workqueue, &hdev->tx_work);
3553 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3555 struct hci_dev *hdev = conn->hdev;
3556 struct hci_sco_hdr hdr;
3558 BT_DBG("%s len %d", hdev->name, skb->len);
3560 hdr.handle = cpu_to_le16(conn->handle);
3561 hdr.dlen = skb->len;
3563 skb_push(skb, HCI_SCO_HDR_SIZE);
3564 skb_reset_transport_header(skb);
3565 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3567 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3569 skb_queue_tail(&conn->data_q, skb);
3570 queue_work(hdev->workqueue, &hdev->tx_work);
3573 /* ---- HCI TX task (outgoing data) ---- */
3575 /* HCI Connection scheduler */
3576 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3579 struct hci_conn_hash *h = &hdev->conn_hash;
3580 struct hci_conn *conn = NULL, *c;
3581 unsigned int num = 0, min = ~0;
3583 /* We don't have to lock device here. Connections are always
3584 * added and removed with TX task disabled. */
3588 list_for_each_entry_rcu(c, &h->list, list) {
3589 if (c->type != type || skb_queue_empty(&c->data_q))
3592 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3597 if (c->sent < min) {
3602 if (hci_conn_num(hdev, type) == num)
3611 switch (conn->type) {
3613 cnt = hdev->acl_cnt;
3617 cnt = hdev->sco_cnt;
3620 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3624 BT_ERR("Unknown link type");
3632 BT_DBG("conn %p quote %d", conn, *quote);
3636 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3638 struct hci_conn_hash *h = &hdev->conn_hash;
3641 BT_ERR("%s link tx timeout", hdev->name);
3645 /* Kill stalled connections */
3646 list_for_each_entry_rcu(c, &h->list, list) {
3647 if (c->type == type && c->sent) {
3648 BT_ERR("%s killing stalled connection %pMR",
3649 hdev->name, &c->dst);
3650 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3657 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3660 struct hci_conn_hash *h = &hdev->conn_hash;
3661 struct hci_chan *chan = NULL;
3662 unsigned int num = 0, min = ~0, cur_prio = 0;
3663 struct hci_conn *conn;
3664 int cnt, q, conn_num = 0;
3666 BT_DBG("%s", hdev->name);
3670 list_for_each_entry_rcu(conn, &h->list, list) {
3671 struct hci_chan *tmp;
3673 if (conn->type != type)
3676 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3681 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3682 struct sk_buff *skb;
3684 if (skb_queue_empty(&tmp->data_q))
3687 skb = skb_peek(&tmp->data_q);
3688 if (skb->priority < cur_prio)
3691 if (skb->priority > cur_prio) {
3694 cur_prio = skb->priority;
3699 if (conn->sent < min) {
3705 if (hci_conn_num(hdev, type) == conn_num)
3714 switch (chan->conn->type) {
3716 cnt = hdev->acl_cnt;
3719 cnt = hdev->block_cnt;
3723 cnt = hdev->sco_cnt;
3726 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3730 BT_ERR("Unknown link type");
3735 BT_DBG("chan %p quote %d", chan, *quote);
3739 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3741 struct hci_conn_hash *h = &hdev->conn_hash;
3742 struct hci_conn *conn;
3745 BT_DBG("%s", hdev->name);
3749 list_for_each_entry_rcu(conn, &h->list, list) {
3750 struct hci_chan *chan;
3752 if (conn->type != type)
3755 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3760 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3761 struct sk_buff *skb;
3768 if (skb_queue_empty(&chan->data_q))
3771 skb = skb_peek(&chan->data_q);
3772 if (skb->priority >= HCI_PRIO_MAX - 1)
3775 skb->priority = HCI_PRIO_MAX - 1;
3777 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3781 if (hci_conn_num(hdev, type) == num)
3789 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3791 /* Calculate count of blocks used by this packet */
3792 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3795 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3797 if (!test_bit(HCI_RAW, &hdev->flags)) {
3798 /* ACL tx timeout must be longer than maximum
3799 * link supervision timeout (40.9 seconds) */
3800 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3801 HCI_ACL_TX_TIMEOUT))
3802 hci_link_tx_to(hdev, ACL_LINK);
3806 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3808 unsigned int cnt = hdev->acl_cnt;
3809 struct hci_chan *chan;
3810 struct sk_buff *skb;
3813 __check_timeout(hdev, cnt);
3815 while (hdev->acl_cnt &&
3816 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3817 u32 priority = (skb_peek(&chan->data_q))->priority;
3818 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3819 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3820 skb->len, skb->priority);
3822 /* Stop if priority has changed */
3823 if (skb->priority < priority)
3826 skb = skb_dequeue(&chan->data_q);
3828 hci_conn_enter_active_mode(chan->conn,
3829 bt_cb(skb)->force_active);
3831 hci_send_frame(hdev, skb);
3832 hdev->acl_last_tx = jiffies;
3840 if (cnt != hdev->acl_cnt)
3841 hci_prio_recalculate(hdev, ACL_LINK);
3844 static void hci_sched_acl_blk(struct hci_dev *hdev)
3846 unsigned int cnt = hdev->block_cnt;
3847 struct hci_chan *chan;
3848 struct sk_buff *skb;
3852 __check_timeout(hdev, cnt);
3854 BT_DBG("%s", hdev->name);
3856 if (hdev->dev_type == HCI_AMP)
3861 while (hdev->block_cnt > 0 &&
3862 (chan = hci_chan_sent(hdev, type, "e))) {
3863 u32 priority = (skb_peek(&chan->data_q))->priority;
3864 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3867 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3868 skb->len, skb->priority);
3870 /* Stop if priority has changed */
3871 if (skb->priority < priority)
3874 skb = skb_dequeue(&chan->data_q);
3876 blocks = __get_blocks(hdev, skb);
3877 if (blocks > hdev->block_cnt)
3880 hci_conn_enter_active_mode(chan->conn,
3881 bt_cb(skb)->force_active);
3883 hci_send_frame(hdev, skb);
3884 hdev->acl_last_tx = jiffies;
3886 hdev->block_cnt -= blocks;
3889 chan->sent += blocks;
3890 chan->conn->sent += blocks;
3894 if (cnt != hdev->block_cnt)
3895 hci_prio_recalculate(hdev, type);
3898 static void hci_sched_acl(struct hci_dev *hdev)
3900 BT_DBG("%s", hdev->name);
3902 /* No ACL link over BR/EDR controller */
3903 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3906 /* No AMP link over AMP controller */
3907 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3910 switch (hdev->flow_ctl_mode) {
3911 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3912 hci_sched_acl_pkt(hdev);
3915 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3916 hci_sched_acl_blk(hdev);
3922 static void hci_sched_sco(struct hci_dev *hdev)
3924 struct hci_conn *conn;
3925 struct sk_buff *skb;
3928 BT_DBG("%s", hdev->name);
3930 if (!hci_conn_num(hdev, SCO_LINK))
3933 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3934 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3935 BT_DBG("skb %p len %d", skb, skb->len);
3936 hci_send_frame(hdev, skb);
3939 if (conn->sent == ~0)
3945 static void hci_sched_esco(struct hci_dev *hdev)
3947 struct hci_conn *conn;
3948 struct sk_buff *skb;
3951 BT_DBG("%s", hdev->name);
3953 if (!hci_conn_num(hdev, ESCO_LINK))
3956 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3958 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3959 BT_DBG("skb %p len %d", skb, skb->len);
3960 hci_send_frame(hdev, skb);
3963 if (conn->sent == ~0)
3969 static void hci_sched_le(struct hci_dev *hdev)
3971 struct hci_chan *chan;
3972 struct sk_buff *skb;
3973 int quote, cnt, tmp;
3975 BT_DBG("%s", hdev->name);
3977 if (!hci_conn_num(hdev, LE_LINK))
3980 if (!test_bit(HCI_RAW, &hdev->flags)) {
3981 /* LE tx timeout must be longer than maximum
3982 * link supervision timeout (40.9 seconds) */
3983 if (!hdev->le_cnt && hdev->le_pkts &&
3984 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3985 hci_link_tx_to(hdev, LE_LINK);
3988 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3990 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3991 u32 priority = (skb_peek(&chan->data_q))->priority;
3992 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3993 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3994 skb->len, skb->priority);
3996 /* Stop if priority has changed */
3997 if (skb->priority < priority)
4000 skb = skb_dequeue(&chan->data_q);
4002 hci_send_frame(hdev, skb);
4003 hdev->le_last_tx = jiffies;
4014 hdev->acl_cnt = cnt;
4017 hci_prio_recalculate(hdev, LE_LINK);
4020 static void hci_tx_work(struct work_struct *work)
4022 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4023 struct sk_buff *skb;
4025 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4026 hdev->sco_cnt, hdev->le_cnt);
4028 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4029 /* Schedule queues and send stuff to HCI driver */
4030 hci_sched_acl(hdev);
4031 hci_sched_sco(hdev);
4032 hci_sched_esco(hdev);
4036 /* Send next queued raw (unknown type) packet */
4037 while ((skb = skb_dequeue(&hdev->raw_q)))
4038 hci_send_frame(hdev, skb);
4041 /* ----- HCI RX task (incoming data processing) ----- */
4043 /* ACL data packet */
4044 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4046 struct hci_acl_hdr *hdr = (void *) skb->data;
4047 struct hci_conn *conn;
4048 __u16 handle, flags;
4050 skb_pull(skb, HCI_ACL_HDR_SIZE);
4052 handle = __le16_to_cpu(hdr->handle);
4053 flags = hci_flags(handle);
4054 handle = hci_handle(handle);
4056 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4059 hdev->stat.acl_rx++;
4062 conn = hci_conn_hash_lookup_handle(hdev, handle);
4063 hci_dev_unlock(hdev);
4066 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4068 /* Send to upper protocol */
4069 l2cap_recv_acldata(conn, skb, flags);
4072 BT_ERR("%s ACL packet for unknown connection handle %d",
4073 hdev->name, handle);
4079 /* SCO data packet */
4080 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4082 struct hci_sco_hdr *hdr = (void *) skb->data;
4083 struct hci_conn *conn;
4086 skb_pull(skb, HCI_SCO_HDR_SIZE);
4088 handle = __le16_to_cpu(hdr->handle);
4090 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4092 hdev->stat.sco_rx++;
4095 conn = hci_conn_hash_lookup_handle(hdev, handle);
4096 hci_dev_unlock(hdev);
4099 /* Send to upper protocol */
4100 sco_recv_scodata(conn, skb);
4103 BT_ERR("%s SCO packet for unknown connection handle %d",
4104 hdev->name, handle);
4110 static bool hci_req_is_complete(struct hci_dev *hdev)
4112 struct sk_buff *skb;
4114 skb = skb_peek(&hdev->cmd_q);
4118 return bt_cb(skb)->req.start;
4121 static void hci_resend_last(struct hci_dev *hdev)
4123 struct hci_command_hdr *sent;
4124 struct sk_buff *skb;
4127 if (!hdev->sent_cmd)
4130 sent = (void *) hdev->sent_cmd->data;
4131 opcode = __le16_to_cpu(sent->opcode);
4132 if (opcode == HCI_OP_RESET)
4135 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4139 skb_queue_head(&hdev->cmd_q, skb);
4140 queue_work(hdev->workqueue, &hdev->cmd_work);
4143 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4145 hci_req_complete_t req_complete = NULL;
4146 struct sk_buff *skb;
4147 unsigned long flags;
4149 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4151 /* If the completed command doesn't match the last one that was
4152 * sent we need to do special handling of it.
4154 if (!hci_sent_cmd_data(hdev, opcode)) {
4155 /* Some CSR based controllers generate a spontaneous
4156 * reset complete event during init and any pending
4157 * command will never be completed. In such a case we
4158 * need to resend whatever was the last sent
4161 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4162 hci_resend_last(hdev);
4167 /* If the command succeeded and there's still more commands in
4168 * this request the request is not yet complete.
4170 if (!status && !hci_req_is_complete(hdev))
4173 /* If this was the last command in a request the complete
4174 * callback would be found in hdev->sent_cmd instead of the
4175 * command queue (hdev->cmd_q).
4177 if (hdev->sent_cmd) {
4178 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4181 /* We must set the complete callback to NULL to
4182 * avoid calling the callback more than once if
4183 * this function gets called again.
4185 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4191 /* Remove all pending commands belonging to this request */
4192 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4193 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4194 if (bt_cb(skb)->req.start) {
4195 __skb_queue_head(&hdev->cmd_q, skb);
4199 req_complete = bt_cb(skb)->req.complete;
4202 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4206 req_complete(hdev, status);
4209 static void hci_rx_work(struct work_struct *work)
4211 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4212 struct sk_buff *skb;
4214 BT_DBG("%s", hdev->name);
4216 while ((skb = skb_dequeue(&hdev->rx_q))) {
4217 /* Send copy to monitor */
4218 hci_send_to_monitor(hdev, skb);
4220 if (atomic_read(&hdev->promisc)) {
4221 /* Send copy to the sockets */
4222 hci_send_to_sock(hdev, skb);
4225 if (test_bit(HCI_RAW, &hdev->flags) ||
4226 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4231 if (test_bit(HCI_INIT, &hdev->flags)) {
4232 /* Don't process data packets in this states. */
4233 switch (bt_cb(skb)->pkt_type) {
4234 case HCI_ACLDATA_PKT:
4235 case HCI_SCODATA_PKT:
4242 switch (bt_cb(skb)->pkt_type) {
4244 BT_DBG("%s Event packet", hdev->name);
4245 hci_event_packet(hdev, skb);
4248 case HCI_ACLDATA_PKT:
4249 BT_DBG("%s ACL data packet", hdev->name);
4250 hci_acldata_packet(hdev, skb);
4253 case HCI_SCODATA_PKT:
4254 BT_DBG("%s SCO data packet", hdev->name);
4255 hci_scodata_packet(hdev, skb);
4265 static void hci_cmd_work(struct work_struct *work)
4267 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4268 struct sk_buff *skb;
4270 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4271 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4273 /* Send queued commands */
4274 if (atomic_read(&hdev->cmd_cnt)) {
4275 skb = skb_dequeue(&hdev->cmd_q);
4279 kfree_skb(hdev->sent_cmd);
4281 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4282 if (hdev->sent_cmd) {
4283 atomic_dec(&hdev->cmd_cnt);
4284 hci_send_frame(hdev, skb);
4285 if (test_bit(HCI_RESET, &hdev->flags))
4286 del_timer(&hdev->cmd_timer);
4288 mod_timer(&hdev->cmd_timer,
4289 jiffies + HCI_CMD_TIMEOUT);
4291 skb_queue_head(&hdev->cmd_q, skb);
4292 queue_work(hdev->workqueue, &hdev->cmd_work);