2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_RWLOCK(hci_cb_list_lock);
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
59 /* ----- HCI requests ----- */
61 #define HCI_REQ_DONE 0
62 #define HCI_REQ_PEND 1
63 #define HCI_REQ_CANCELED 2
65 #define hci_req_lock(d) mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
68 /* ---- HCI notifications ---- */
70 static void hci_notify(struct hci_dev *hdev, int event)
72 hci_sock_dev_event(hdev, event);
75 /* ---- HCI debugfs entries ---- */
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
80 struct hci_dev *hdev = file->private_data;
83 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
92 struct hci_dev *hdev = file->private_data;
95 size_t buf_size = min(count, (sizeof(buf)-1));
99 if (!test_bit(HCI_UP, &hdev->flags))
102 if (copy_from_user(buf, user_buf, buf_size))
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
109 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
119 hci_req_unlock(hdev);
124 err = -bt_to_errno(skb->data[0]);
130 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
135 static const struct file_operations dut_mode_fops = {
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
142 /* ---- HCI requests ---- */
144 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
146 BT_DBG("%s result 0x%2.2x", hdev->name, result);
148 if (hdev->req_status == HCI_REQ_PEND) {
149 hdev->req_result = result;
150 hdev->req_status = HCI_REQ_DONE;
151 wake_up_interruptible(&hdev->req_wait_q);
155 static void hci_req_cancel(struct hci_dev *hdev, int err)
157 BT_DBG("%s err 0x%2.2x", hdev->name, err);
159 if (hdev->req_status == HCI_REQ_PEND) {
160 hdev->req_result = err;
161 hdev->req_status = HCI_REQ_CANCELED;
162 wake_up_interruptible(&hdev->req_wait_q);
166 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
169 struct hci_ev_cmd_complete *ev;
170 struct hci_event_hdr *hdr;
175 skb = hdev->recv_evt;
176 hdev->recv_evt = NULL;
178 hci_dev_unlock(hdev);
181 return ERR_PTR(-ENODATA);
183 if (skb->len < sizeof(*hdr)) {
184 BT_ERR("Too short HCI event");
188 hdr = (void *) skb->data;
189 skb_pull(skb, HCI_EVENT_HDR_SIZE);
192 if (hdr->evt != event)
197 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
202 if (skb->len < sizeof(*ev)) {
203 BT_ERR("Too short cmd_complete event");
207 ev = (void *) skb->data;
208 skb_pull(skb, sizeof(*ev));
210 if (opcode == __le16_to_cpu(ev->opcode))
213 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214 __le16_to_cpu(ev->opcode));
218 return ERR_PTR(-ENODATA);
221 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
222 const void *param, u8 event, u32 timeout)
224 DECLARE_WAITQUEUE(wait, current);
225 struct hci_request req;
228 BT_DBG("%s", hdev->name);
230 hci_req_init(&req, hdev);
232 hci_req_add_ev(&req, opcode, plen, param, event);
234 hdev->req_status = HCI_REQ_PEND;
236 add_wait_queue(&hdev->req_wait_q, &wait);
237 set_current_state(TASK_INTERRUPTIBLE);
239 err = hci_req_run(&req, hci_req_sync_complete);
241 remove_wait_queue(&hdev->req_wait_q, &wait);
242 set_current_state(TASK_RUNNING);
246 schedule_timeout(timeout);
248 remove_wait_queue(&hdev->req_wait_q, &wait);
250 if (signal_pending(current))
251 return ERR_PTR(-EINTR);
253 switch (hdev->req_status) {
255 err = -bt_to_errno(hdev->req_result);
258 case HCI_REQ_CANCELED:
259 err = -hdev->req_result;
267 hdev->req_status = hdev->req_result = 0;
269 BT_DBG("%s end: err %d", hdev->name, err);
274 return hci_get_cmd_complete(hdev, opcode, event);
276 EXPORT_SYMBOL(__hci_cmd_sync_ev);
278 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
279 const void *param, u32 timeout)
281 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
283 EXPORT_SYMBOL(__hci_cmd_sync);
285 /* Execute request and wait for completion. */
286 static int __hci_req_sync(struct hci_dev *hdev,
287 void (*func)(struct hci_request *req,
289 unsigned long opt, __u32 timeout)
291 struct hci_request req;
292 DECLARE_WAITQUEUE(wait, current);
295 BT_DBG("%s start", hdev->name);
297 hci_req_init(&req, hdev);
299 hdev->req_status = HCI_REQ_PEND;
303 add_wait_queue(&hdev->req_wait_q, &wait);
304 set_current_state(TASK_INTERRUPTIBLE);
306 err = hci_req_run(&req, hci_req_sync_complete);
308 hdev->req_status = 0;
310 remove_wait_queue(&hdev->req_wait_q, &wait);
311 set_current_state(TASK_RUNNING);
313 /* ENODATA means the HCI request command queue is empty.
314 * This can happen when a request with conditionals doesn't
315 * trigger any commands to be sent. This is normal behavior
316 * and should not trigger an error return.
324 schedule_timeout(timeout);
326 remove_wait_queue(&hdev->req_wait_q, &wait);
328 if (signal_pending(current))
331 switch (hdev->req_status) {
333 err = -bt_to_errno(hdev->req_result);
336 case HCI_REQ_CANCELED:
337 err = -hdev->req_result;
345 hdev->req_status = hdev->req_result = 0;
347 BT_DBG("%s end: err %d", hdev->name, err);
352 static int hci_req_sync(struct hci_dev *hdev,
353 void (*req)(struct hci_request *req,
355 unsigned long opt, __u32 timeout)
359 if (!test_bit(HCI_UP, &hdev->flags))
362 /* Serialize all requests */
364 ret = __hci_req_sync(hdev, req, opt, timeout);
365 hci_req_unlock(hdev);
370 static void hci_reset_req(struct hci_request *req, unsigned long opt)
372 BT_DBG("%s %ld", req->hdev->name, opt);
375 set_bit(HCI_RESET, &req->hdev->flags);
376 hci_req_add(req, HCI_OP_RESET, 0, NULL);
379 static void bredr_init(struct hci_request *req)
381 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
383 /* Read Local Supported Features */
384 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
386 /* Read Local Version */
387 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
389 /* Read BD Address */
390 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
393 static void amp_init(struct hci_request *req)
395 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
397 /* Read Local Version */
398 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
400 /* Read Local Supported Commands */
401 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
403 /* Read Local Supported Features */
404 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
406 /* Read Local AMP Info */
407 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
409 /* Read Data Blk size */
410 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
412 /* Read Flow Control Mode */
413 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
415 /* Read Location Data */
416 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
419 static void hci_init1_req(struct hci_request *req, unsigned long opt)
421 struct hci_dev *hdev = req->hdev;
423 BT_DBG("%s %ld", hdev->name, opt);
426 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
427 hci_reset_req(req, 0);
429 switch (hdev->dev_type) {
439 BT_ERR("Unknown device type %d", hdev->dev_type);
444 static void bredr_setup(struct hci_request *req)
449 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
450 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
452 /* Read Class of Device */
453 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
455 /* Read Local Name */
456 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
458 /* Read Voice Setting */
459 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
461 /* Read Number of Supported IAC */
462 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
464 /* Read Current IAC LAP */
465 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
467 /* Clear Event Filters */
468 flt_type = HCI_FLT_CLEAR_ALL;
469 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
471 /* Connection accept timeout ~20 secs */
472 param = cpu_to_le16(0x7d00);
473 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
476 static void le_setup(struct hci_request *req)
478 struct hci_dev *hdev = req->hdev;
480 /* Read LE Buffer Size */
481 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
483 /* Read LE Local Supported Features */
484 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
486 /* Read LE Supported States */
487 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
489 /* Read LE White List Size */
490 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
492 /* Clear LE White List */
493 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
495 /* LE-only controllers have LE implicitly enabled */
496 if (!lmp_bredr_capable(hdev))
497 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
500 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
502 if (lmp_ext_inq_capable(hdev))
505 if (lmp_inq_rssi_capable(hdev))
508 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
509 hdev->lmp_subver == 0x0757)
512 if (hdev->manufacturer == 15) {
513 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
515 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
517 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
521 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
522 hdev->lmp_subver == 0x1805)
528 static void hci_setup_inquiry_mode(struct hci_request *req)
532 mode = hci_get_inquiry_mode(req->hdev);
534 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
537 static void hci_setup_event_mask(struct hci_request *req)
539 struct hci_dev *hdev = req->hdev;
541 /* The second byte is 0xff instead of 0x9f (two reserved bits
542 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
545 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
547 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
548 * any event mask for pre 1.2 devices.
550 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
553 if (lmp_bredr_capable(hdev)) {
554 events[4] |= 0x01; /* Flow Specification Complete */
555 events[4] |= 0x02; /* Inquiry Result with RSSI */
556 events[4] |= 0x04; /* Read Remote Extended Features Complete */
557 events[5] |= 0x08; /* Synchronous Connection Complete */
558 events[5] |= 0x10; /* Synchronous Connection Changed */
560 /* Use a different default for LE-only devices */
561 memset(events, 0, sizeof(events));
562 events[0] |= 0x10; /* Disconnection Complete */
563 events[1] |= 0x08; /* Read Remote Version Information Complete */
564 events[1] |= 0x20; /* Command Complete */
565 events[1] |= 0x40; /* Command Status */
566 events[1] |= 0x80; /* Hardware Error */
567 events[2] |= 0x04; /* Number of Completed Packets */
568 events[3] |= 0x02; /* Data Buffer Overflow */
570 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
571 events[0] |= 0x80; /* Encryption Change */
572 events[5] |= 0x80; /* Encryption Key Refresh Complete */
576 if (lmp_inq_rssi_capable(hdev))
577 events[4] |= 0x02; /* Inquiry Result with RSSI */
579 if (lmp_sniffsubr_capable(hdev))
580 events[5] |= 0x20; /* Sniff Subrating */
582 if (lmp_pause_enc_capable(hdev))
583 events[5] |= 0x80; /* Encryption Key Refresh Complete */
585 if (lmp_ext_inq_capable(hdev))
586 events[5] |= 0x40; /* Extended Inquiry Result */
588 if (lmp_no_flush_capable(hdev))
589 events[7] |= 0x01; /* Enhanced Flush Complete */
591 if (lmp_lsto_capable(hdev))
592 events[6] |= 0x80; /* Link Supervision Timeout Changed */
594 if (lmp_ssp_capable(hdev)) {
595 events[6] |= 0x01; /* IO Capability Request */
596 events[6] |= 0x02; /* IO Capability Response */
597 events[6] |= 0x04; /* User Confirmation Request */
598 events[6] |= 0x08; /* User Passkey Request */
599 events[6] |= 0x10; /* Remote OOB Data Request */
600 events[6] |= 0x20; /* Simple Pairing Complete */
601 events[7] |= 0x04; /* User Passkey Notification */
602 events[7] |= 0x08; /* Keypress Notification */
603 events[7] |= 0x10; /* Remote Host Supported
604 * Features Notification
608 if (lmp_le_capable(hdev))
609 events[7] |= 0x20; /* LE Meta-Event */
611 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
614 static void hci_init2_req(struct hci_request *req, unsigned long opt)
616 struct hci_dev *hdev = req->hdev;
618 if (lmp_bredr_capable(hdev))
621 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
623 if (lmp_le_capable(hdev))
626 /* All Bluetooth 1.2 and later controllers should support the
627 * HCI command for reading the local supported commands.
629 * Unfortunately some controllers indicate Bluetooth 1.2 support,
630 * but do not have support for this command. If that is the case,
631 * the driver can quirk the behavior and skip reading the local
632 * supported commands.
634 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
635 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
636 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
638 if (lmp_ssp_capable(hdev)) {
639 /* When SSP is available, then the host features page
640 * should also be available as well. However some
641 * controllers list the max_page as 0 as long as SSP
642 * has not been enabled. To achieve proper debugging
643 * output, force the minimum max_page to 1 at least.
645 hdev->max_page = 0x01;
647 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
649 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
650 sizeof(mode), &mode);
652 struct hci_cp_write_eir cp;
654 memset(hdev->eir, 0, sizeof(hdev->eir));
655 memset(&cp, 0, sizeof(cp));
657 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
661 if (lmp_inq_rssi_capable(hdev))
662 hci_setup_inquiry_mode(req);
664 if (lmp_inq_tx_pwr_capable(hdev))
665 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
667 if (lmp_ext_feat_capable(hdev)) {
668 struct hci_cp_read_local_ext_features cp;
671 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
675 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
677 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
682 static void hci_setup_link_policy(struct hci_request *req)
684 struct hci_dev *hdev = req->hdev;
685 struct hci_cp_write_def_link_policy cp;
688 if (lmp_rswitch_capable(hdev))
689 link_policy |= HCI_LP_RSWITCH;
690 if (lmp_hold_capable(hdev))
691 link_policy |= HCI_LP_HOLD;
692 if (lmp_sniff_capable(hdev))
693 link_policy |= HCI_LP_SNIFF;
694 if (lmp_park_capable(hdev))
695 link_policy |= HCI_LP_PARK;
697 cp.policy = cpu_to_le16(link_policy);
698 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
701 static void hci_set_le_support(struct hci_request *req)
703 struct hci_dev *hdev = req->hdev;
704 struct hci_cp_write_le_host_supported cp;
706 /* LE-only devices do not support explicit enablement */
707 if (!lmp_bredr_capable(hdev))
710 memset(&cp, 0, sizeof(cp));
712 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
717 if (cp.le != lmp_host_le_capable(hdev))
718 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
722 static void hci_set_event_mask_page_2(struct hci_request *req)
724 struct hci_dev *hdev = req->hdev;
725 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
727 /* If Connectionless Slave Broadcast master role is supported
728 * enable all necessary events for it.
730 if (lmp_csb_master_capable(hdev)) {
731 events[1] |= 0x40; /* Triggered Clock Capture */
732 events[1] |= 0x80; /* Synchronization Train Complete */
733 events[2] |= 0x10; /* Slave Page Response Timeout */
734 events[2] |= 0x20; /* CSB Channel Map Change */
737 /* If Connectionless Slave Broadcast slave role is supported
738 * enable all necessary events for it.
740 if (lmp_csb_slave_capable(hdev)) {
741 events[2] |= 0x01; /* Synchronization Train Received */
742 events[2] |= 0x02; /* CSB Receive */
743 events[2] |= 0x04; /* CSB Timeout */
744 events[2] |= 0x08; /* Truncated Page Complete */
747 /* Enable Authenticated Payload Timeout Expired event if supported */
748 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
751 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
754 static void hci_init3_req(struct hci_request *req, unsigned long opt)
756 struct hci_dev *hdev = req->hdev;
759 hci_setup_event_mask(req);
761 /* Some Broadcom based Bluetooth controllers do not support the
762 * Delete Stored Link Key command. They are clearly indicating its
763 * absence in the bit mask of supported commands.
765 * Check the supported commands and only if the the command is marked
766 * as supported send it. If not supported assume that the controller
767 * does not have actual support for stored link keys which makes this
768 * command redundant anyway.
770 * Some controllers indicate that they support handling deleting
771 * stored link keys, but they don't. The quirk lets a driver
772 * just disable this command.
774 if (hdev->commands[6] & 0x80 &&
775 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
776 struct hci_cp_delete_stored_link_key cp;
778 bacpy(&cp.bdaddr, BDADDR_ANY);
779 cp.delete_all = 0x01;
780 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
784 if (hdev->commands[5] & 0x10)
785 hci_setup_link_policy(req);
787 if (hdev->commands[8] & 0x01)
788 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
790 /* Some older Broadcom based Bluetooth 1.2 controllers do not
791 * support the Read Page Scan Type command. Check support for
792 * this command in the bit mask of supported commands.
794 if (hdev->commands[13] & 0x01)
795 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
797 if (lmp_le_capable(hdev)) {
800 memset(events, 0, sizeof(events));
803 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
804 events[0] |= 0x10; /* LE Long Term Key Request */
806 /* If controller supports the Connection Parameters Request
807 * Link Layer Procedure, enable the corresponding event.
809 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
810 events[0] |= 0x20; /* LE Remote Connection
814 /* If the controller supports the Data Length Extension
815 * feature, enable the corresponding event.
817 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
818 events[0] |= 0x40; /* LE Data Length Change */
820 /* If the controller supports Extended Scanner Filter
821 * Policies, enable the correspondig event.
823 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
824 events[1] |= 0x04; /* LE Direct Advertising
828 /* If the controller supports the LE Read Local P-256
829 * Public Key command, enable the corresponding event.
831 if (hdev->commands[34] & 0x02)
832 events[0] |= 0x80; /* LE Read Local P-256
833 * Public Key Complete
836 /* If the controller supports the LE Generate DHKey
837 * command, enable the corresponding event.
839 if (hdev->commands[34] & 0x04)
840 events[1] |= 0x01; /* LE Generate DHKey Complete */
842 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
845 if (hdev->commands[25] & 0x40) {
846 /* Read LE Advertising Channel TX Power */
847 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
850 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
851 /* Read LE Maximum Data Length */
852 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
854 /* Read LE Suggested Default Data Length */
855 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
858 hci_set_le_support(req);
861 /* Read features beyond page 1 if available */
862 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
863 struct hci_cp_read_local_ext_features cp;
866 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
871 static void hci_init4_req(struct hci_request *req, unsigned long opt)
873 struct hci_dev *hdev = req->hdev;
875 /* Set event mask page 2 if the HCI command for it is supported */
876 if (hdev->commands[22] & 0x04)
877 hci_set_event_mask_page_2(req);
879 /* Read local codec list if the HCI command is supported */
880 if (hdev->commands[29] & 0x20)
881 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
883 /* Get MWS transport configuration if the HCI command is supported */
884 if (hdev->commands[30] & 0x08)
885 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
887 /* Check for Synchronization Train support */
888 if (lmp_sync_train_capable(hdev))
889 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
891 /* Enable Secure Connections if supported and configured */
892 if (bredr_sc_enabled(hdev)) {
894 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
895 sizeof(support), &support);
899 static int __hci_init(struct hci_dev *hdev)
903 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
907 /* The Device Under Test (DUT) mode is special and available for
908 * all controller types. So just create it early on.
910 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
911 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
915 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
916 * BR/EDR/LE type controllers. AMP controllers only need the
919 if (hdev->dev_type != HCI_BREDR)
922 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
926 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
930 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
934 /* Only create debugfs entries during the initial setup
935 * phase and not every time the controller gets powered on.
937 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
940 hci_debugfs_create_common(hdev);
942 if (lmp_bredr_capable(hdev))
943 hci_debugfs_create_bredr(hdev);
945 if (lmp_le_capable(hdev)) {
946 hci_debugfs_create_le(hdev);
953 static void hci_init0_req(struct hci_request *req, unsigned long opt)
955 struct hci_dev *hdev = req->hdev;
957 BT_DBG("%s %ld", hdev->name, opt);
960 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
961 hci_reset_req(req, 0);
963 /* Read Local Version */
964 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
966 /* Read BD Address */
967 if (hdev->set_bdaddr)
968 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
971 static int __hci_unconf_init(struct hci_dev *hdev)
975 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
978 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
985 static void hci_scan_req(struct hci_request *req, unsigned long opt)
989 BT_DBG("%s %x", req->hdev->name, scan);
991 /* Inquiry and Page scans */
992 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
995 static void hci_auth_req(struct hci_request *req, unsigned long opt)
999 BT_DBG("%s %x", req->hdev->name, auth);
1001 /* Authentication */
1002 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1005 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1009 BT_DBG("%s %x", req->hdev->name, encrypt);
1012 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1015 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1017 __le16 policy = cpu_to_le16(opt);
1019 BT_DBG("%s %x", req->hdev->name, policy);
1021 /* Default link policy */
1022 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1025 /* Get HCI device by index.
1026 * Device is held on return. */
1027 struct hci_dev *hci_dev_get(int index)
1029 struct hci_dev *hdev = NULL, *d;
1031 BT_DBG("%d", index);
1036 read_lock(&hci_dev_list_lock);
1037 list_for_each_entry(d, &hci_dev_list, list) {
1038 if (d->id == index) {
1039 hdev = hci_dev_hold(d);
1043 read_unlock(&hci_dev_list_lock);
1047 /* ---- Inquiry support ---- */
1049 bool hci_discovery_active(struct hci_dev *hdev)
1051 struct discovery_state *discov = &hdev->discovery;
1053 switch (discov->state) {
1054 case DISCOVERY_FINDING:
1055 case DISCOVERY_RESOLVING:
1063 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1065 int old_state = hdev->discovery.state;
1067 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1069 if (old_state == state)
1072 hdev->discovery.state = state;
1075 case DISCOVERY_STOPPED:
1076 hci_update_background_scan(hdev);
1078 if (old_state != DISCOVERY_STARTING)
1079 mgmt_discovering(hdev, 0);
1081 case DISCOVERY_STARTING:
1083 case DISCOVERY_FINDING:
1084 mgmt_discovering(hdev, 1);
1086 case DISCOVERY_RESOLVING:
1088 case DISCOVERY_STOPPING:
1093 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1095 struct discovery_state *cache = &hdev->discovery;
1096 struct inquiry_entry *p, *n;
1098 list_for_each_entry_safe(p, n, &cache->all, all) {
1103 INIT_LIST_HEAD(&cache->unknown);
1104 INIT_LIST_HEAD(&cache->resolve);
1107 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1110 struct discovery_state *cache = &hdev->discovery;
1111 struct inquiry_entry *e;
1113 BT_DBG("cache %p, %pMR", cache, bdaddr);
1115 list_for_each_entry(e, &cache->all, all) {
1116 if (!bacmp(&e->data.bdaddr, bdaddr))
1123 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1126 struct discovery_state *cache = &hdev->discovery;
1127 struct inquiry_entry *e;
1129 BT_DBG("cache %p, %pMR", cache, bdaddr);
1131 list_for_each_entry(e, &cache->unknown, list) {
1132 if (!bacmp(&e->data.bdaddr, bdaddr))
1139 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1143 struct discovery_state *cache = &hdev->discovery;
1144 struct inquiry_entry *e;
1146 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1148 list_for_each_entry(e, &cache->resolve, list) {
1149 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1151 if (!bacmp(&e->data.bdaddr, bdaddr))
1158 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1159 struct inquiry_entry *ie)
1161 struct discovery_state *cache = &hdev->discovery;
1162 struct list_head *pos = &cache->resolve;
1163 struct inquiry_entry *p;
1165 list_del(&ie->list);
1167 list_for_each_entry(p, &cache->resolve, list) {
1168 if (p->name_state != NAME_PENDING &&
1169 abs(p->data.rssi) >= abs(ie->data.rssi))
1174 list_add(&ie->list, pos);
1177 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1180 struct discovery_state *cache = &hdev->discovery;
1181 struct inquiry_entry *ie;
1184 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1186 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1188 if (!data->ssp_mode)
1189 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1191 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1193 if (!ie->data.ssp_mode)
1194 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1196 if (ie->name_state == NAME_NEEDED &&
1197 data->rssi != ie->data.rssi) {
1198 ie->data.rssi = data->rssi;
1199 hci_inquiry_cache_update_resolve(hdev, ie);
1205 /* Entry not in the cache. Add new one. */
1206 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1208 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1212 list_add(&ie->all, &cache->all);
1215 ie->name_state = NAME_KNOWN;
1217 ie->name_state = NAME_NOT_KNOWN;
1218 list_add(&ie->list, &cache->unknown);
1222 if (name_known && ie->name_state != NAME_KNOWN &&
1223 ie->name_state != NAME_PENDING) {
1224 ie->name_state = NAME_KNOWN;
1225 list_del(&ie->list);
1228 memcpy(&ie->data, data, sizeof(*data));
1229 ie->timestamp = jiffies;
1230 cache->timestamp = jiffies;
1232 if (ie->name_state == NAME_NOT_KNOWN)
1233 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1239 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1241 struct discovery_state *cache = &hdev->discovery;
1242 struct inquiry_info *info = (struct inquiry_info *) buf;
1243 struct inquiry_entry *e;
1246 list_for_each_entry(e, &cache->all, all) {
1247 struct inquiry_data *data = &e->data;
1252 bacpy(&info->bdaddr, &data->bdaddr);
1253 info->pscan_rep_mode = data->pscan_rep_mode;
1254 info->pscan_period_mode = data->pscan_period_mode;
1255 info->pscan_mode = data->pscan_mode;
1256 memcpy(info->dev_class, data->dev_class, 3);
1257 info->clock_offset = data->clock_offset;
1263 BT_DBG("cache %p, copied %d", cache, copied);
1267 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1269 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1270 struct hci_dev *hdev = req->hdev;
1271 struct hci_cp_inquiry cp;
1273 BT_DBG("%s", hdev->name);
1275 if (test_bit(HCI_INQUIRY, &hdev->flags))
1279 memcpy(&cp.lap, &ir->lap, 3);
1280 cp.length = ir->length;
1281 cp.num_rsp = ir->num_rsp;
1282 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1285 int hci_inquiry(void __user *arg)
1287 __u8 __user *ptr = arg;
1288 struct hci_inquiry_req ir;
1289 struct hci_dev *hdev;
1290 int err = 0, do_inquiry = 0, max_rsp;
1294 if (copy_from_user(&ir, ptr, sizeof(ir)))
1297 hdev = hci_dev_get(ir.dev_id);
1301 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1306 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1311 if (hdev->dev_type != HCI_BREDR) {
1316 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1322 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1323 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1324 hci_inquiry_cache_flush(hdev);
1327 hci_dev_unlock(hdev);
1329 timeo = ir.length * msecs_to_jiffies(2000);
1332 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1337 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1338 * cleared). If it is interrupted by a signal, return -EINTR.
1340 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1341 TASK_INTERRUPTIBLE))
1345 /* for unlimited number of responses we will use buffer with
1348 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1350 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1351 * copy it to the user space.
1353 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1360 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1361 hci_dev_unlock(hdev);
1363 BT_DBG("num_rsp %d", ir.num_rsp);
1365 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1367 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1380 static int hci_dev_do_open(struct hci_dev *hdev)
1384 BT_DBG("%s %p", hdev->name, hdev);
1388 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1393 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1394 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1395 /* Check for rfkill but allow the HCI setup stage to
1396 * proceed (which in itself doesn't cause any RF activity).
1398 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1403 /* Check for valid public address or a configured static
1404 * random adddress, but let the HCI setup proceed to
1405 * be able to determine if there is a public address
1408 * In case of user channel usage, it is not important
1409 * if a public address or static random address is
1412 * This check is only valid for BR/EDR controllers
1413 * since AMP controllers do not have an address.
1415 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1416 hdev->dev_type == HCI_BREDR &&
1417 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1418 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1419 ret = -EADDRNOTAVAIL;
1424 if (test_bit(HCI_UP, &hdev->flags)) {
1429 if (hdev->open(hdev)) {
1434 atomic_set(&hdev->cmd_cnt, 1);
1435 set_bit(HCI_INIT, &hdev->flags);
1437 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1439 ret = hdev->setup(hdev);
1441 /* The transport driver can set these quirks before
1442 * creating the HCI device or in its setup callback.
1444 * In case any of them is set, the controller has to
1445 * start up as unconfigured.
1447 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1448 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1449 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
1451 /* For an unconfigured controller it is required to
1452 * read at least the version information provided by
1453 * the Read Local Version Information command.
1455 * If the set_bdaddr driver callback is provided, then
1456 * also the original Bluetooth public device address
1457 * will be read using the Read BD Address command.
1459 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1460 ret = __hci_unconf_init(hdev);
1463 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1464 /* If public address change is configured, ensure that
1465 * the address gets programmed. If the driver does not
1466 * support changing the public address, fail the power
1469 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1471 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1473 ret = -EADDRNOTAVAIL;
1477 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1478 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1479 ret = __hci_init(hdev);
1482 clear_bit(HCI_INIT, &hdev->flags);
1486 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1487 set_bit(HCI_UP, &hdev->flags);
1488 hci_notify(hdev, HCI_DEV_UP);
1489 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1490 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
1491 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1492 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1493 hdev->dev_type == HCI_BREDR) {
1495 mgmt_powered(hdev, 1);
1496 hci_dev_unlock(hdev);
1499 /* Init failed, cleanup */
1500 flush_work(&hdev->tx_work);
1501 flush_work(&hdev->cmd_work);
1502 flush_work(&hdev->rx_work);
1504 skb_queue_purge(&hdev->cmd_q);
1505 skb_queue_purge(&hdev->rx_q);
1510 if (hdev->sent_cmd) {
1511 kfree_skb(hdev->sent_cmd);
1512 hdev->sent_cmd = NULL;
1516 hdev->flags &= BIT(HCI_RAW);
1520 hci_req_unlock(hdev);
1524 /* ---- HCI ioctl helpers ---- */
1526 int hci_dev_open(__u16 dev)
1528 struct hci_dev *hdev;
1531 hdev = hci_dev_get(dev);
1535 /* Devices that are marked as unconfigured can only be powered
1536 * up as user channel. Trying to bring them up as normal devices
1537 * will result into a failure. Only user channel operation is
1540 * When this function is called for a user channel, the flag
1541 * HCI_USER_CHANNEL will be set first before attempting to
1544 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1545 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1550 /* We need to ensure that no other power on/off work is pending
1551 * before proceeding to call hci_dev_do_open. This is
1552 * particularly important if the setup procedure has not yet
1555 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1556 cancel_delayed_work(&hdev->power_off);
1558 /* After this call it is guaranteed that the setup procedure
1559 * has finished. This means that error conditions like RFKILL
1560 * or no valid public or static random address apply.
1562 flush_workqueue(hdev->req_workqueue);
1564 /* For controllers not using the management interface and that
1565 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1566 * so that pairing works for them. Once the management interface
1567 * is in use this bit will be cleared again and userspace has
1568 * to explicitly enable it.
1570 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1571 !test_bit(HCI_MGMT, &hdev->dev_flags))
1572 set_bit(HCI_BONDABLE, &hdev->dev_flags);
1574 err = hci_dev_do_open(hdev);
1581 /* This function requires the caller holds hdev->lock */
1582 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1584 struct hci_conn_params *p;
1586 list_for_each_entry(p, &hdev->le_conn_params, list) {
1588 hci_conn_drop(p->conn);
1589 hci_conn_put(p->conn);
1592 list_del_init(&p->action);
1595 BT_DBG("All LE pending actions cleared");
1598 static int hci_dev_do_close(struct hci_dev *hdev)
1600 BT_DBG("%s %p", hdev->name, hdev);
1602 cancel_delayed_work(&hdev->power_off);
1604 hci_req_cancel(hdev, ENODEV);
1607 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1608 cancel_delayed_work_sync(&hdev->cmd_timer);
1609 hci_req_unlock(hdev);
1613 /* Flush RX and TX works */
1614 flush_work(&hdev->tx_work);
1615 flush_work(&hdev->rx_work);
1617 if (hdev->discov_timeout > 0) {
1618 cancel_delayed_work(&hdev->discov_off);
1619 hdev->discov_timeout = 0;
1620 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1621 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1624 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1625 cancel_delayed_work(&hdev->service_cache);
1627 cancel_delayed_work_sync(&hdev->le_scan_disable);
1629 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1630 cancel_delayed_work_sync(&hdev->rpa_expired);
1632 /* Avoid potential lockdep warnings from the *_flush() calls by
1633 * ensuring the workqueue is empty up front.
1635 drain_workqueue(hdev->workqueue);
1639 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1640 if (hdev->dev_type == HCI_BREDR)
1641 mgmt_powered(hdev, 0);
1644 hci_inquiry_cache_flush(hdev);
1645 hci_pend_le_actions_clear(hdev);
1646 hci_conn_hash_flush(hdev);
1647 hci_dev_unlock(hdev);
1649 hci_notify(hdev, HCI_DEV_DOWN);
1655 skb_queue_purge(&hdev->cmd_q);
1656 atomic_set(&hdev->cmd_cnt, 1);
1657 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1658 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1659 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1660 set_bit(HCI_INIT, &hdev->flags);
1661 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1662 clear_bit(HCI_INIT, &hdev->flags);
1665 /* flush cmd work */
1666 flush_work(&hdev->cmd_work);
1669 skb_queue_purge(&hdev->rx_q);
1670 skb_queue_purge(&hdev->cmd_q);
1671 skb_queue_purge(&hdev->raw_q);
1673 /* Drop last sent command */
1674 if (hdev->sent_cmd) {
1675 cancel_delayed_work_sync(&hdev->cmd_timer);
1676 kfree_skb(hdev->sent_cmd);
1677 hdev->sent_cmd = NULL;
1680 kfree_skb(hdev->recv_evt);
1681 hdev->recv_evt = NULL;
1683 /* After this point our queues are empty
1684 * and no tasks are scheduled. */
1688 hdev->flags &= BIT(HCI_RAW);
1689 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1691 /* Controller radio is available but is currently powered down */
1692 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1694 memset(hdev->eir, 0, sizeof(hdev->eir));
1695 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1696 bacpy(&hdev->random_addr, BDADDR_ANY);
1698 hci_req_unlock(hdev);
1704 int hci_dev_close(__u16 dev)
1706 struct hci_dev *hdev;
1709 hdev = hci_dev_get(dev);
1713 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1718 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1719 cancel_delayed_work(&hdev->power_off);
1721 err = hci_dev_do_close(hdev);
1728 int hci_dev_reset(__u16 dev)
1730 struct hci_dev *hdev;
1733 hdev = hci_dev_get(dev);
1739 if (!test_bit(HCI_UP, &hdev->flags)) {
1744 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1749 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1755 skb_queue_purge(&hdev->rx_q);
1756 skb_queue_purge(&hdev->cmd_q);
1758 /* Avoid potential lockdep warnings from the *_flush() calls by
1759 * ensuring the workqueue is empty up front.
1761 drain_workqueue(hdev->workqueue);
1764 hci_inquiry_cache_flush(hdev);
1765 hci_conn_hash_flush(hdev);
1766 hci_dev_unlock(hdev);
1771 atomic_set(&hdev->cmd_cnt, 1);
1772 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1774 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1777 hci_req_unlock(hdev);
1782 int hci_dev_reset_stat(__u16 dev)
1784 struct hci_dev *hdev;
1787 hdev = hci_dev_get(dev);
1791 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1796 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1801 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1808 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1810 bool conn_changed, discov_changed;
1812 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1814 if ((scan & SCAN_PAGE))
1815 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1818 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1821 if ((scan & SCAN_INQUIRY)) {
1822 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1825 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1826 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1830 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1833 if (conn_changed || discov_changed) {
1834 /* In case this was disabled through mgmt */
1835 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1837 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1838 mgmt_update_adv_data(hdev);
1840 mgmt_new_settings(hdev);
1844 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1846 struct hci_dev *hdev;
1847 struct hci_dev_req dr;
1850 if (copy_from_user(&dr, arg, sizeof(dr)))
1853 hdev = hci_dev_get(dr.dev_id);
1857 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1862 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1867 if (hdev->dev_type != HCI_BREDR) {
1872 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1879 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1884 if (!lmp_encrypt_capable(hdev)) {
1889 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1890 /* Auth must be enabled first */
1891 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1897 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1902 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1905 /* Ensure that the connectable and discoverable states
1906 * get correctly modified as this was a non-mgmt change.
1909 hci_update_scan_state(hdev, dr.dev_opt);
1913 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1917 case HCISETLINKMODE:
1918 hdev->link_mode = ((__u16) dr.dev_opt) &
1919 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1923 hdev->pkt_type = (__u16) dr.dev_opt;
1927 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1928 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1932 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1933 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1946 int hci_get_dev_list(void __user *arg)
1948 struct hci_dev *hdev;
1949 struct hci_dev_list_req *dl;
1950 struct hci_dev_req *dr;
1951 int n = 0, size, err;
1954 if (get_user(dev_num, (__u16 __user *) arg))
1957 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1960 size = sizeof(*dl) + dev_num * sizeof(*dr);
1962 dl = kzalloc(size, GFP_KERNEL);
1968 read_lock(&hci_dev_list_lock);
1969 list_for_each_entry(hdev, &hci_dev_list, list) {
1970 unsigned long flags = hdev->flags;
1972 /* When the auto-off is configured it means the transport
1973 * is running, but in that case still indicate that the
1974 * device is actually down.
1976 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1977 flags &= ~BIT(HCI_UP);
1979 (dr + n)->dev_id = hdev->id;
1980 (dr + n)->dev_opt = flags;
1985 read_unlock(&hci_dev_list_lock);
1988 size = sizeof(*dl) + n * sizeof(*dr);
1990 err = copy_to_user(arg, dl, size);
1993 return err ? -EFAULT : 0;
1996 int hci_get_dev_info(void __user *arg)
1998 struct hci_dev *hdev;
1999 struct hci_dev_info di;
2000 unsigned long flags;
2003 if (copy_from_user(&di, arg, sizeof(di)))
2006 hdev = hci_dev_get(di.dev_id);
2010 /* When the auto-off is configured it means the transport
2011 * is running, but in that case still indicate that the
2012 * device is actually down.
2014 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2015 flags = hdev->flags & ~BIT(HCI_UP);
2017 flags = hdev->flags;
2019 strcpy(di.name, hdev->name);
2020 di.bdaddr = hdev->bdaddr;
2021 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2023 di.pkt_type = hdev->pkt_type;
2024 if (lmp_bredr_capable(hdev)) {
2025 di.acl_mtu = hdev->acl_mtu;
2026 di.acl_pkts = hdev->acl_pkts;
2027 di.sco_mtu = hdev->sco_mtu;
2028 di.sco_pkts = hdev->sco_pkts;
2030 di.acl_mtu = hdev->le_mtu;
2031 di.acl_pkts = hdev->le_pkts;
2035 di.link_policy = hdev->link_policy;
2036 di.link_mode = hdev->link_mode;
2038 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2039 memcpy(&di.features, &hdev->features, sizeof(di.features));
2041 if (copy_to_user(arg, &di, sizeof(di)))
2049 /* ---- Interface to HCI drivers ---- */
2051 static int hci_rfkill_set_block(void *data, bool blocked)
2053 struct hci_dev *hdev = data;
2055 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2057 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2061 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2062 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2063 !test_bit(HCI_CONFIG, &hdev->dev_flags))
2064 hci_dev_do_close(hdev);
2066 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2072 static const struct rfkill_ops hci_rfkill_ops = {
2073 .set_block = hci_rfkill_set_block,
2076 static void hci_power_on(struct work_struct *work)
2078 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2081 BT_DBG("%s", hdev->name);
2083 err = hci_dev_do_open(hdev);
2086 mgmt_set_powered_failed(hdev, err);
2087 hci_dev_unlock(hdev);
2091 /* During the HCI setup phase, a few error conditions are
2092 * ignored and they need to be checked now. If they are still
2093 * valid, it is important to turn the device back off.
2095 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2096 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2097 (hdev->dev_type == HCI_BREDR &&
2098 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2099 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2100 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2101 hci_dev_do_close(hdev);
2102 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2103 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2104 HCI_AUTO_OFF_TIMEOUT);
2107 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2108 /* For unconfigured devices, set the HCI_RAW flag
2109 * so that userspace can easily identify them.
2111 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2112 set_bit(HCI_RAW, &hdev->flags);
2114 /* For fully configured devices, this will send
2115 * the Index Added event. For unconfigured devices,
2116 * it will send Unconfigued Index Added event.
2118 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2119 * and no event will be send.
2121 mgmt_index_added(hdev);
2122 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2123 /* When the controller is now configured, then it
2124 * is important to clear the HCI_RAW flag.
2126 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2127 clear_bit(HCI_RAW, &hdev->flags);
2129 /* Powering on the controller with HCI_CONFIG set only
2130 * happens with the transition from unconfigured to
2131 * configured. This will send the Index Added event.
2133 mgmt_index_added(hdev);
2137 static void hci_power_off(struct work_struct *work)
2139 struct hci_dev *hdev = container_of(work, struct hci_dev,
2142 BT_DBG("%s", hdev->name);
2144 hci_dev_do_close(hdev);
2147 static void hci_discov_off(struct work_struct *work)
2149 struct hci_dev *hdev;
2151 hdev = container_of(work, struct hci_dev, discov_off.work);
2153 BT_DBG("%s", hdev->name);
2155 mgmt_discoverable_timeout(hdev);
2158 void hci_uuids_clear(struct hci_dev *hdev)
2160 struct bt_uuid *uuid, *tmp;
2162 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2163 list_del(&uuid->list);
2168 void hci_link_keys_clear(struct hci_dev *hdev)
2170 struct link_key *key;
2172 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2173 list_del_rcu(&key->list);
2174 kfree_rcu(key, rcu);
2178 void hci_smp_ltks_clear(struct hci_dev *hdev)
2182 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2183 list_del_rcu(&k->list);
2188 void hci_smp_irks_clear(struct hci_dev *hdev)
2192 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2193 list_del_rcu(&k->list);
2198 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2203 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2204 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2214 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2215 u8 key_type, u8 old_key_type)
2218 if (key_type < 0x03)
2221 /* Debug keys are insecure so don't store them persistently */
2222 if (key_type == HCI_LK_DEBUG_COMBINATION)
2225 /* Changed combination key and there's no previous one */
2226 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2229 /* Security mode 3 case */
2233 /* BR/EDR key derived using SC from an LE link */
2234 if (conn->type == LE_LINK)
2237 /* Neither local nor remote side had no-bonding as requirement */
2238 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2241 /* Local side had dedicated bonding as requirement */
2242 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2245 /* Remote side had dedicated bonding as requirement */
2246 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2249 /* If none of the above criteria match, then don't store the key
2254 static u8 ltk_role(u8 type)
2256 if (type == SMP_LTK)
2257 return HCI_ROLE_MASTER;
2259 return HCI_ROLE_SLAVE;
2262 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2263 u8 addr_type, u8 role)
2268 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2269 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2272 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2282 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2284 struct smp_irk *irk;
2287 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2288 if (!bacmp(&irk->rpa, rpa)) {
2294 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2295 if (smp_irk_matches(hdev, irk->val, rpa)) {
2296 bacpy(&irk->rpa, rpa);
2306 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2309 struct smp_irk *irk;
2311 /* Identity Address must be public or static random */
2312 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2316 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2317 if (addr_type == irk->addr_type &&
2318 bacmp(bdaddr, &irk->bdaddr) == 0) {
2328 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2329 bdaddr_t *bdaddr, u8 *val, u8 type,
2330 u8 pin_len, bool *persistent)
2332 struct link_key *key, *old_key;
2335 old_key = hci_find_link_key(hdev, bdaddr);
2337 old_key_type = old_key->type;
2340 old_key_type = conn ? conn->key_type : 0xff;
2341 key = kzalloc(sizeof(*key), GFP_KERNEL);
2344 list_add_rcu(&key->list, &hdev->link_keys);
2347 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2349 /* Some buggy controller combinations generate a changed
2350 * combination key for legacy pairing even when there's no
2352 if (type == HCI_LK_CHANGED_COMBINATION &&
2353 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2354 type = HCI_LK_COMBINATION;
2356 conn->key_type = type;
2359 bacpy(&key->bdaddr, bdaddr);
2360 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2361 key->pin_len = pin_len;
2363 if (type == HCI_LK_CHANGED_COMBINATION)
2364 key->type = old_key_type;
2369 *persistent = hci_persistent_key(hdev, conn, type,
2375 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2376 u8 addr_type, u8 type, u8 authenticated,
2377 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2379 struct smp_ltk *key, *old_key;
2380 u8 role = ltk_role(type);
2382 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2386 key = kzalloc(sizeof(*key), GFP_KERNEL);
2389 list_add_rcu(&key->list, &hdev->long_term_keys);
2392 bacpy(&key->bdaddr, bdaddr);
2393 key->bdaddr_type = addr_type;
2394 memcpy(key->val, tk, sizeof(key->val));
2395 key->authenticated = authenticated;
2398 key->enc_size = enc_size;
2404 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2405 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2407 struct smp_irk *irk;
2409 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2411 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2415 bacpy(&irk->bdaddr, bdaddr);
2416 irk->addr_type = addr_type;
2418 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2421 memcpy(irk->val, val, 16);
2422 bacpy(&irk->rpa, rpa);
2427 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2429 struct link_key *key;
2431 key = hci_find_link_key(hdev, bdaddr);
2435 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2437 list_del_rcu(&key->list);
2438 kfree_rcu(key, rcu);
2443 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2448 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2449 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2452 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2454 list_del_rcu(&k->list);
2459 return removed ? 0 : -ENOENT;
2462 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2466 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2467 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2470 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2472 list_del_rcu(&k->list);
2477 /* HCI command timer function */
2478 static void hci_cmd_timeout(struct work_struct *work)
2480 struct hci_dev *hdev = container_of(work, struct hci_dev,
2483 if (hdev->sent_cmd) {
2484 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2485 u16 opcode = __le16_to_cpu(sent->opcode);
2487 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2489 BT_ERR("%s command tx timeout", hdev->name);
2492 atomic_set(&hdev->cmd_cnt, 1);
2493 queue_work(hdev->workqueue, &hdev->cmd_work);
2496 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2497 bdaddr_t *bdaddr, u8 bdaddr_type)
2499 struct oob_data *data;
2501 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2502 if (bacmp(bdaddr, &data->bdaddr) != 0)
2504 if (data->bdaddr_type != bdaddr_type)
2512 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2515 struct oob_data *data;
2517 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2521 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2523 list_del(&data->list);
2529 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2531 struct oob_data *data, *n;
2533 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2534 list_del(&data->list);
2539 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2540 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2541 u8 *hash256, u8 *rand256)
2543 struct oob_data *data;
2545 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2547 data = kmalloc(sizeof(*data), GFP_KERNEL);
2551 bacpy(&data->bdaddr, bdaddr);
2552 data->bdaddr_type = bdaddr_type;
2553 list_add(&data->list, &hdev->remote_oob_data);
2556 if (hash192 && rand192) {
2557 memcpy(data->hash192, hash192, sizeof(data->hash192));
2558 memcpy(data->rand192, rand192, sizeof(data->rand192));
2560 memset(data->hash192, 0, sizeof(data->hash192));
2561 memset(data->rand192, 0, sizeof(data->rand192));
2564 if (hash256 && rand256) {
2565 memcpy(data->hash256, hash256, sizeof(data->hash256));
2566 memcpy(data->rand256, rand256, sizeof(data->rand256));
2568 memset(data->hash256, 0, sizeof(data->hash256));
2569 memset(data->rand256, 0, sizeof(data->rand256));
2572 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2577 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2578 bdaddr_t *bdaddr, u8 type)
2580 struct bdaddr_list *b;
2582 list_for_each_entry(b, bdaddr_list, list) {
2583 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2590 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2592 struct list_head *p, *n;
2594 list_for_each_safe(p, n, bdaddr_list) {
2595 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2602 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2604 struct bdaddr_list *entry;
2606 if (!bacmp(bdaddr, BDADDR_ANY))
2609 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2612 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2616 bacpy(&entry->bdaddr, bdaddr);
2617 entry->bdaddr_type = type;
2619 list_add(&entry->list, list);
2624 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2626 struct bdaddr_list *entry;
2628 if (!bacmp(bdaddr, BDADDR_ANY)) {
2629 hci_bdaddr_list_clear(list);
2633 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2637 list_del(&entry->list);
2643 /* This function requires the caller holds hdev->lock */
2644 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2645 bdaddr_t *addr, u8 addr_type)
2647 struct hci_conn_params *params;
2649 /* The conn params list only contains identity addresses */
2650 if (!hci_is_identity_address(addr, addr_type))
2653 list_for_each_entry(params, &hdev->le_conn_params, list) {
2654 if (bacmp(¶ms->addr, addr) == 0 &&
2655 params->addr_type == addr_type) {
2663 /* This function requires the caller holds hdev->lock */
2664 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2665 bdaddr_t *addr, u8 addr_type)
2667 struct hci_conn_params *param;
2669 /* The list only contains identity addresses */
2670 if (!hci_is_identity_address(addr, addr_type))
2673 list_for_each_entry(param, list, action) {
2674 if (bacmp(¶m->addr, addr) == 0 &&
2675 param->addr_type == addr_type)
2682 /* This function requires the caller holds hdev->lock */
2683 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2684 bdaddr_t *addr, u8 addr_type)
2686 struct hci_conn_params *params;
2688 if (!hci_is_identity_address(addr, addr_type))
2691 params = hci_conn_params_lookup(hdev, addr, addr_type);
2695 params = kzalloc(sizeof(*params), GFP_KERNEL);
2697 BT_ERR("Out of memory");
2701 bacpy(¶ms->addr, addr);
2702 params->addr_type = addr_type;
2704 list_add(¶ms->list, &hdev->le_conn_params);
2705 INIT_LIST_HEAD(¶ms->action);
2707 params->conn_min_interval = hdev->le_conn_min_interval;
2708 params->conn_max_interval = hdev->le_conn_max_interval;
2709 params->conn_latency = hdev->le_conn_latency;
2710 params->supervision_timeout = hdev->le_supv_timeout;
2711 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2713 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2718 static void hci_conn_params_free(struct hci_conn_params *params)
2721 hci_conn_drop(params->conn);
2722 hci_conn_put(params->conn);
2725 list_del(¶ms->action);
2726 list_del(¶ms->list);
2730 /* This function requires the caller holds hdev->lock */
2731 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2733 struct hci_conn_params *params;
2735 params = hci_conn_params_lookup(hdev, addr, addr_type);
2739 hci_conn_params_free(params);
2741 hci_update_background_scan(hdev);
2743 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2746 /* This function requires the caller holds hdev->lock */
2747 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2749 struct hci_conn_params *params, *tmp;
2751 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2752 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2754 list_del(¶ms->list);
2758 BT_DBG("All LE disabled connection parameters were removed");
2761 /* This function requires the caller holds hdev->lock */
2762 void hci_conn_params_clear_all(struct hci_dev *hdev)
2764 struct hci_conn_params *params, *tmp;
2766 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2767 hci_conn_params_free(params);
2769 hci_update_background_scan(hdev);
2771 BT_DBG("All LE connection parameters were removed");
2774 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2777 BT_ERR("Failed to start inquiry: status %d", status);
2780 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2781 hci_dev_unlock(hdev);
2786 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2788 /* General inquiry access code (GIAC) */
2789 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2790 struct hci_request req;
2791 struct hci_cp_inquiry cp;
2795 BT_ERR("Failed to disable LE scanning: status %d", status);
2799 switch (hdev->discovery.type) {
2800 case DISCOV_TYPE_LE:
2802 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2803 hci_dev_unlock(hdev);
2806 case DISCOV_TYPE_INTERLEAVED:
2807 hci_req_init(&req, hdev);
2809 memset(&cp, 0, sizeof(cp));
2810 memcpy(&cp.lap, lap, sizeof(cp.lap));
2811 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2812 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2816 hci_inquiry_cache_flush(hdev);
2818 err = hci_req_run(&req, inquiry_complete);
2820 BT_ERR("Inquiry request failed: err %d", err);
2821 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2824 hci_dev_unlock(hdev);
2829 static void le_scan_disable_work(struct work_struct *work)
2831 struct hci_dev *hdev = container_of(work, struct hci_dev,
2832 le_scan_disable.work);
2833 struct hci_request req;
2836 BT_DBG("%s", hdev->name);
2838 hci_req_init(&req, hdev);
2840 hci_req_add_le_scan_disable(&req);
2842 err = hci_req_run(&req, le_scan_disable_work_complete);
2844 BT_ERR("Disable LE scanning request failed: err %d", err);
2847 /* Copy the Identity Address of the controller.
2849 * If the controller has a public BD_ADDR, then by default use that one.
2850 * If this is a LE only controller without a public address, default to
2851 * the static random address.
2853 * For debugging purposes it is possible to force controllers with a
2854 * public address to use the static random address instead.
2856 * In case BR/EDR has been disabled on a dual-mode controller and
2857 * userspace has configured a static address, then that address
2858 * becomes the identity address instead of the public BR/EDR address.
2860 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2863 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
2864 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2865 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2866 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2867 bacpy(bdaddr, &hdev->static_addr);
2868 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2870 bacpy(bdaddr, &hdev->bdaddr);
2871 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2875 /* Alloc HCI device */
2876 struct hci_dev *hci_alloc_dev(void)
2878 struct hci_dev *hdev;
2880 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2884 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2885 hdev->esco_type = (ESCO_HV1);
2886 hdev->link_mode = (HCI_LM_ACCEPT);
2887 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2888 hdev->io_capability = 0x03; /* No Input No Output */
2889 hdev->manufacturer = 0xffff; /* Default to internal use */
2890 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2891 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2893 hdev->sniff_max_interval = 800;
2894 hdev->sniff_min_interval = 80;
2896 hdev->le_adv_channel_map = 0x07;
2897 hdev->le_adv_min_interval = 0x0800;
2898 hdev->le_adv_max_interval = 0x0800;
2899 hdev->le_scan_interval = 0x0060;
2900 hdev->le_scan_window = 0x0030;
2901 hdev->le_conn_min_interval = 0x0028;
2902 hdev->le_conn_max_interval = 0x0038;
2903 hdev->le_conn_latency = 0x0000;
2904 hdev->le_supv_timeout = 0x002a;
2905 hdev->le_def_tx_len = 0x001b;
2906 hdev->le_def_tx_time = 0x0148;
2907 hdev->le_max_tx_len = 0x001b;
2908 hdev->le_max_tx_time = 0x0148;
2909 hdev->le_max_rx_len = 0x001b;
2910 hdev->le_max_rx_time = 0x0148;
2912 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2913 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2914 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2915 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2917 mutex_init(&hdev->lock);
2918 mutex_init(&hdev->req_lock);
2920 INIT_LIST_HEAD(&hdev->mgmt_pending);
2921 INIT_LIST_HEAD(&hdev->blacklist);
2922 INIT_LIST_HEAD(&hdev->whitelist);
2923 INIT_LIST_HEAD(&hdev->uuids);
2924 INIT_LIST_HEAD(&hdev->link_keys);
2925 INIT_LIST_HEAD(&hdev->long_term_keys);
2926 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2927 INIT_LIST_HEAD(&hdev->remote_oob_data);
2928 INIT_LIST_HEAD(&hdev->le_white_list);
2929 INIT_LIST_HEAD(&hdev->le_conn_params);
2930 INIT_LIST_HEAD(&hdev->pend_le_conns);
2931 INIT_LIST_HEAD(&hdev->pend_le_reports);
2932 INIT_LIST_HEAD(&hdev->conn_hash.list);
2934 INIT_WORK(&hdev->rx_work, hci_rx_work);
2935 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2936 INIT_WORK(&hdev->tx_work, hci_tx_work);
2937 INIT_WORK(&hdev->power_on, hci_power_on);
2939 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2940 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2941 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2943 skb_queue_head_init(&hdev->rx_q);
2944 skb_queue_head_init(&hdev->cmd_q);
2945 skb_queue_head_init(&hdev->raw_q);
2947 init_waitqueue_head(&hdev->req_wait_q);
2949 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2951 hci_init_sysfs(hdev);
2952 discovery_init(hdev);
2956 EXPORT_SYMBOL(hci_alloc_dev);
2958 /* Free HCI device */
2959 void hci_free_dev(struct hci_dev *hdev)
2961 /* will free via device release */
2962 put_device(&hdev->dev);
2964 EXPORT_SYMBOL(hci_free_dev);
2966 /* Register HCI device */
2967 int hci_register_dev(struct hci_dev *hdev)
2971 if (!hdev->open || !hdev->close || !hdev->send)
2974 /* Do not allow HCI_AMP devices to register at index 0,
2975 * so the index can be used as the AMP controller ID.
2977 switch (hdev->dev_type) {
2979 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2982 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2991 sprintf(hdev->name, "hci%d", id);
2994 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2996 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2997 WQ_MEM_RECLAIM, 1, hdev->name);
2998 if (!hdev->workqueue) {
3003 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3004 WQ_MEM_RECLAIM, 1, hdev->name);
3005 if (!hdev->req_workqueue) {
3006 destroy_workqueue(hdev->workqueue);
3011 if (!IS_ERR_OR_NULL(bt_debugfs))
3012 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3014 dev_set_name(&hdev->dev, "%s", hdev->name);
3016 error = device_add(&hdev->dev);
3020 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3021 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3024 if (rfkill_register(hdev->rfkill) < 0) {
3025 rfkill_destroy(hdev->rfkill);
3026 hdev->rfkill = NULL;
3030 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3031 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3033 set_bit(HCI_SETUP, &hdev->dev_flags);
3034 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3036 if (hdev->dev_type == HCI_BREDR) {
3037 /* Assume BR/EDR support until proven otherwise (such as
3038 * through reading supported features during init.
3040 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3043 write_lock(&hci_dev_list_lock);
3044 list_add(&hdev->list, &hci_dev_list);
3045 write_unlock(&hci_dev_list_lock);
3047 /* Devices that are marked for raw-only usage are unconfigured
3048 * and should not be included in normal operation.
3050 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3051 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3053 hci_notify(hdev, HCI_DEV_REG);
3056 queue_work(hdev->req_workqueue, &hdev->power_on);
3061 destroy_workqueue(hdev->workqueue);
3062 destroy_workqueue(hdev->req_workqueue);
3064 ida_simple_remove(&hci_index_ida, hdev->id);
3068 EXPORT_SYMBOL(hci_register_dev);
3070 /* Unregister HCI device */
3071 void hci_unregister_dev(struct hci_dev *hdev)
3075 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3077 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3081 write_lock(&hci_dev_list_lock);
3082 list_del(&hdev->list);
3083 write_unlock(&hci_dev_list_lock);
3085 hci_dev_do_close(hdev);
3087 for (i = 0; i < NUM_REASSEMBLY; i++)
3088 kfree_skb(hdev->reassembly[i]);
3090 cancel_work_sync(&hdev->power_on);
3092 if (!test_bit(HCI_INIT, &hdev->flags) &&
3093 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3094 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
3096 mgmt_index_removed(hdev);
3097 hci_dev_unlock(hdev);
3100 /* mgmt_index_removed should take care of emptying the
3102 BUG_ON(!list_empty(&hdev->mgmt_pending));
3104 hci_notify(hdev, HCI_DEV_UNREG);
3107 rfkill_unregister(hdev->rfkill);
3108 rfkill_destroy(hdev->rfkill);
3111 smp_unregister(hdev);
3113 device_del(&hdev->dev);
3115 debugfs_remove_recursive(hdev->debugfs);
3117 destroy_workqueue(hdev->workqueue);
3118 destroy_workqueue(hdev->req_workqueue);
3121 hci_bdaddr_list_clear(&hdev->blacklist);
3122 hci_bdaddr_list_clear(&hdev->whitelist);
3123 hci_uuids_clear(hdev);
3124 hci_link_keys_clear(hdev);
3125 hci_smp_ltks_clear(hdev);
3126 hci_smp_irks_clear(hdev);
3127 hci_remote_oob_data_clear(hdev);
3128 hci_bdaddr_list_clear(&hdev->le_white_list);
3129 hci_conn_params_clear_all(hdev);
3130 hci_discovery_filter_clear(hdev);
3131 hci_dev_unlock(hdev);
3135 ida_simple_remove(&hci_index_ida, id);
3137 EXPORT_SYMBOL(hci_unregister_dev);
3139 /* Suspend HCI device */
3140 int hci_suspend_dev(struct hci_dev *hdev)
3142 hci_notify(hdev, HCI_DEV_SUSPEND);
3145 EXPORT_SYMBOL(hci_suspend_dev);
3147 /* Resume HCI device */
3148 int hci_resume_dev(struct hci_dev *hdev)
3150 hci_notify(hdev, HCI_DEV_RESUME);
3153 EXPORT_SYMBOL(hci_resume_dev);
3155 /* Reset HCI device */
3156 int hci_reset_dev(struct hci_dev *hdev)
3158 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3159 struct sk_buff *skb;
3161 skb = bt_skb_alloc(3, GFP_ATOMIC);
3165 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3166 memcpy(skb_put(skb, 3), hw_err, 3);
3168 /* Send Hardware Error to upper stack */
3169 return hci_recv_frame(hdev, skb);
3171 EXPORT_SYMBOL(hci_reset_dev);
3173 /* Receive frame from HCI drivers */
3174 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3176 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3177 && !test_bit(HCI_INIT, &hdev->flags))) {
3183 bt_cb(skb)->incoming = 1;
3186 __net_timestamp(skb);
3188 skb_queue_tail(&hdev->rx_q, skb);
3189 queue_work(hdev->workqueue, &hdev->rx_work);
3193 EXPORT_SYMBOL(hci_recv_frame);
3195 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3196 int count, __u8 index)
3201 struct sk_buff *skb;
3202 struct bt_skb_cb *scb;
3204 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3205 index >= NUM_REASSEMBLY)
3208 skb = hdev->reassembly[index];
3212 case HCI_ACLDATA_PKT:
3213 len = HCI_MAX_FRAME_SIZE;
3214 hlen = HCI_ACL_HDR_SIZE;
3217 len = HCI_MAX_EVENT_SIZE;
3218 hlen = HCI_EVENT_HDR_SIZE;
3220 case HCI_SCODATA_PKT:
3221 len = HCI_MAX_SCO_SIZE;
3222 hlen = HCI_SCO_HDR_SIZE;
3226 skb = bt_skb_alloc(len, GFP_ATOMIC);
3230 scb = (void *) skb->cb;
3232 scb->pkt_type = type;
3234 hdev->reassembly[index] = skb;
3238 scb = (void *) skb->cb;
3239 len = min_t(uint, scb->expect, count);
3241 memcpy(skb_put(skb, len), data, len);
3250 if (skb->len == HCI_EVENT_HDR_SIZE) {
3251 struct hci_event_hdr *h = hci_event_hdr(skb);
3252 scb->expect = h->plen;
3254 if (skb_tailroom(skb) < scb->expect) {
3256 hdev->reassembly[index] = NULL;
3262 case HCI_ACLDATA_PKT:
3263 if (skb->len == HCI_ACL_HDR_SIZE) {
3264 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3265 scb->expect = __le16_to_cpu(h->dlen);
3267 if (skb_tailroom(skb) < scb->expect) {
3269 hdev->reassembly[index] = NULL;
3275 case HCI_SCODATA_PKT:
3276 if (skb->len == HCI_SCO_HDR_SIZE) {
3277 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3278 scb->expect = h->dlen;
3280 if (skb_tailroom(skb) < scb->expect) {
3282 hdev->reassembly[index] = NULL;
3289 if (scb->expect == 0) {
3290 /* Complete frame */
3292 bt_cb(skb)->pkt_type = type;
3293 hci_recv_frame(hdev, skb);
3295 hdev->reassembly[index] = NULL;
3303 #define STREAM_REASSEMBLY 0
3305 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3311 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3314 struct { char type; } *pkt;
3316 /* Start of the frame */
3323 type = bt_cb(skb)->pkt_type;
3325 rem = hci_reassembly(hdev, type, data, count,
3330 data += (count - rem);
3336 EXPORT_SYMBOL(hci_recv_stream_fragment);
3338 /* ---- Interface to upper protocols ---- */
3340 int hci_register_cb(struct hci_cb *cb)
3342 BT_DBG("%p name %s", cb, cb->name);
3344 write_lock(&hci_cb_list_lock);
3345 list_add(&cb->list, &hci_cb_list);
3346 write_unlock(&hci_cb_list_lock);
3350 EXPORT_SYMBOL(hci_register_cb);
3352 int hci_unregister_cb(struct hci_cb *cb)
3354 BT_DBG("%p name %s", cb, cb->name);
3356 write_lock(&hci_cb_list_lock);
3357 list_del(&cb->list);
3358 write_unlock(&hci_cb_list_lock);
3362 EXPORT_SYMBOL(hci_unregister_cb);
3364 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3368 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3371 __net_timestamp(skb);
3373 /* Send copy to monitor */
3374 hci_send_to_monitor(hdev, skb);
3376 if (atomic_read(&hdev->promisc)) {
3377 /* Send copy to the sockets */
3378 hci_send_to_sock(hdev, skb);
3381 /* Get rid of skb owner, prior to sending to the driver. */
3384 err = hdev->send(hdev, skb);
3386 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3391 bool hci_req_pending(struct hci_dev *hdev)
3393 return (hdev->req_status == HCI_REQ_PEND);
3396 /* Send HCI command */
3397 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3400 struct sk_buff *skb;
3402 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3404 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3406 BT_ERR("%s no memory for command", hdev->name);
3410 /* Stand-alone HCI commands must be flagged as
3411 * single-command requests.
3413 bt_cb(skb)->req.start = true;
3415 skb_queue_tail(&hdev->cmd_q, skb);
3416 queue_work(hdev->workqueue, &hdev->cmd_work);
3421 /* Get data from the previously sent command */
3422 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3424 struct hci_command_hdr *hdr;
3426 if (!hdev->sent_cmd)
3429 hdr = (void *) hdev->sent_cmd->data;
3431 if (hdr->opcode != cpu_to_le16(opcode))
3434 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3436 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3440 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3442 struct hci_acl_hdr *hdr;
3445 skb_push(skb, HCI_ACL_HDR_SIZE);
3446 skb_reset_transport_header(skb);
3447 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3448 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3449 hdr->dlen = cpu_to_le16(len);
3452 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3453 struct sk_buff *skb, __u16 flags)
3455 struct hci_conn *conn = chan->conn;
3456 struct hci_dev *hdev = conn->hdev;
3457 struct sk_buff *list;
3459 skb->len = skb_headlen(skb);
3462 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3464 switch (hdev->dev_type) {
3466 hci_add_acl_hdr(skb, conn->handle, flags);
3469 hci_add_acl_hdr(skb, chan->handle, flags);
3472 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3476 list = skb_shinfo(skb)->frag_list;
3478 /* Non fragmented */
3479 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3481 skb_queue_tail(queue, skb);
3484 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3486 skb_shinfo(skb)->frag_list = NULL;
3488 /* Queue all fragments atomically. We need to use spin_lock_bh
3489 * here because of 6LoWPAN links, as there this function is
3490 * called from softirq and using normal spin lock could cause
3493 spin_lock_bh(&queue->lock);
3495 __skb_queue_tail(queue, skb);
3497 flags &= ~ACL_START;
3500 skb = list; list = list->next;
3502 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3503 hci_add_acl_hdr(skb, conn->handle, flags);
3505 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3507 __skb_queue_tail(queue, skb);
3510 spin_unlock_bh(&queue->lock);
3514 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3516 struct hci_dev *hdev = chan->conn->hdev;
3518 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3520 hci_queue_acl(chan, &chan->data_q, skb, flags);
3522 queue_work(hdev->workqueue, &hdev->tx_work);
3526 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3528 struct hci_dev *hdev = conn->hdev;
3529 struct hci_sco_hdr hdr;
3531 BT_DBG("%s len %d", hdev->name, skb->len);
3533 hdr.handle = cpu_to_le16(conn->handle);
3534 hdr.dlen = skb->len;
3536 skb_push(skb, HCI_SCO_HDR_SIZE);
3537 skb_reset_transport_header(skb);
3538 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3540 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3542 skb_queue_tail(&conn->data_q, skb);
3543 queue_work(hdev->workqueue, &hdev->tx_work);
3546 /* ---- HCI TX task (outgoing data) ---- */
3548 /* HCI Connection scheduler */
3549 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3552 struct hci_conn_hash *h = &hdev->conn_hash;
3553 struct hci_conn *conn = NULL, *c;
3554 unsigned int num = 0, min = ~0;
3556 /* We don't have to lock device here. Connections are always
3557 * added and removed with TX task disabled. */
3561 list_for_each_entry_rcu(c, &h->list, list) {
3562 if (c->type != type || skb_queue_empty(&c->data_q))
3565 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3570 if (c->sent < min) {
3575 if (hci_conn_num(hdev, type) == num)
3584 switch (conn->type) {
3586 cnt = hdev->acl_cnt;
3590 cnt = hdev->sco_cnt;
3593 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3597 BT_ERR("Unknown link type");
3605 BT_DBG("conn %p quote %d", conn, *quote);
3609 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3611 struct hci_conn_hash *h = &hdev->conn_hash;
3614 BT_ERR("%s link tx timeout", hdev->name);
3618 /* Kill stalled connections */
3619 list_for_each_entry_rcu(c, &h->list, list) {
3620 if (c->type == type && c->sent) {
3621 BT_ERR("%s killing stalled connection %pMR",
3622 hdev->name, &c->dst);
3623 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3630 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3633 struct hci_conn_hash *h = &hdev->conn_hash;
3634 struct hci_chan *chan = NULL;
3635 unsigned int num = 0, min = ~0, cur_prio = 0;
3636 struct hci_conn *conn;
3637 int cnt, q, conn_num = 0;
3639 BT_DBG("%s", hdev->name);
3643 list_for_each_entry_rcu(conn, &h->list, list) {
3644 struct hci_chan *tmp;
3646 if (conn->type != type)
3649 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3654 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3655 struct sk_buff *skb;
3657 if (skb_queue_empty(&tmp->data_q))
3660 skb = skb_peek(&tmp->data_q);
3661 if (skb->priority < cur_prio)
3664 if (skb->priority > cur_prio) {
3667 cur_prio = skb->priority;
3672 if (conn->sent < min) {
3678 if (hci_conn_num(hdev, type) == conn_num)
3687 switch (chan->conn->type) {
3689 cnt = hdev->acl_cnt;
3692 cnt = hdev->block_cnt;
3696 cnt = hdev->sco_cnt;
3699 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3703 BT_ERR("Unknown link type");
3708 BT_DBG("chan %p quote %d", chan, *quote);
3712 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3714 struct hci_conn_hash *h = &hdev->conn_hash;
3715 struct hci_conn *conn;
3718 BT_DBG("%s", hdev->name);
3722 list_for_each_entry_rcu(conn, &h->list, list) {
3723 struct hci_chan *chan;
3725 if (conn->type != type)
3728 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3733 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3734 struct sk_buff *skb;
3741 if (skb_queue_empty(&chan->data_q))
3744 skb = skb_peek(&chan->data_q);
3745 if (skb->priority >= HCI_PRIO_MAX - 1)
3748 skb->priority = HCI_PRIO_MAX - 1;
3750 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3754 if (hci_conn_num(hdev, type) == num)
3762 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3764 /* Calculate count of blocks used by this packet */
3765 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3768 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3770 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
3771 /* ACL tx timeout must be longer than maximum
3772 * link supervision timeout (40.9 seconds) */
3773 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3774 HCI_ACL_TX_TIMEOUT))
3775 hci_link_tx_to(hdev, ACL_LINK);
3779 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3781 unsigned int cnt = hdev->acl_cnt;
3782 struct hci_chan *chan;
3783 struct sk_buff *skb;
3786 __check_timeout(hdev, cnt);
3788 while (hdev->acl_cnt &&
3789 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3790 u32 priority = (skb_peek(&chan->data_q))->priority;
3791 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3792 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3793 skb->len, skb->priority);
3795 /* Stop if priority has changed */
3796 if (skb->priority < priority)
3799 skb = skb_dequeue(&chan->data_q);
3801 hci_conn_enter_active_mode(chan->conn,
3802 bt_cb(skb)->force_active);
3804 hci_send_frame(hdev, skb);
3805 hdev->acl_last_tx = jiffies;
3813 if (cnt != hdev->acl_cnt)
3814 hci_prio_recalculate(hdev, ACL_LINK);
3817 static void hci_sched_acl_blk(struct hci_dev *hdev)
3819 unsigned int cnt = hdev->block_cnt;
3820 struct hci_chan *chan;
3821 struct sk_buff *skb;
3825 __check_timeout(hdev, cnt);
3827 BT_DBG("%s", hdev->name);
3829 if (hdev->dev_type == HCI_AMP)
3834 while (hdev->block_cnt > 0 &&
3835 (chan = hci_chan_sent(hdev, type, "e))) {
3836 u32 priority = (skb_peek(&chan->data_q))->priority;
3837 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3840 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3841 skb->len, skb->priority);
3843 /* Stop if priority has changed */
3844 if (skb->priority < priority)
3847 skb = skb_dequeue(&chan->data_q);
3849 blocks = __get_blocks(hdev, skb);
3850 if (blocks > hdev->block_cnt)
3853 hci_conn_enter_active_mode(chan->conn,
3854 bt_cb(skb)->force_active);
3856 hci_send_frame(hdev, skb);
3857 hdev->acl_last_tx = jiffies;
3859 hdev->block_cnt -= blocks;
3862 chan->sent += blocks;
3863 chan->conn->sent += blocks;
3867 if (cnt != hdev->block_cnt)
3868 hci_prio_recalculate(hdev, type);
3871 static void hci_sched_acl(struct hci_dev *hdev)
3873 BT_DBG("%s", hdev->name);
3875 /* No ACL link over BR/EDR controller */
3876 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3879 /* No AMP link over AMP controller */
3880 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3883 switch (hdev->flow_ctl_mode) {
3884 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3885 hci_sched_acl_pkt(hdev);
3888 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3889 hci_sched_acl_blk(hdev);
3895 static void hci_sched_sco(struct hci_dev *hdev)
3897 struct hci_conn *conn;
3898 struct sk_buff *skb;
3901 BT_DBG("%s", hdev->name);
3903 if (!hci_conn_num(hdev, SCO_LINK))
3906 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3907 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3908 BT_DBG("skb %p len %d", skb, skb->len);
3909 hci_send_frame(hdev, skb);
3912 if (conn->sent == ~0)
3918 static void hci_sched_esco(struct hci_dev *hdev)
3920 struct hci_conn *conn;
3921 struct sk_buff *skb;
3924 BT_DBG("%s", hdev->name);
3926 if (!hci_conn_num(hdev, ESCO_LINK))
3929 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3931 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3932 BT_DBG("skb %p len %d", skb, skb->len);
3933 hci_send_frame(hdev, skb);
3936 if (conn->sent == ~0)
3942 static void hci_sched_le(struct hci_dev *hdev)
3944 struct hci_chan *chan;
3945 struct sk_buff *skb;
3946 int quote, cnt, tmp;
3948 BT_DBG("%s", hdev->name);
3950 if (!hci_conn_num(hdev, LE_LINK))
3953 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
3954 /* LE tx timeout must be longer than maximum
3955 * link supervision timeout (40.9 seconds) */
3956 if (!hdev->le_cnt && hdev->le_pkts &&
3957 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3958 hci_link_tx_to(hdev, LE_LINK);
3961 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3963 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3964 u32 priority = (skb_peek(&chan->data_q))->priority;
3965 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3966 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3967 skb->len, skb->priority);
3969 /* Stop if priority has changed */
3970 if (skb->priority < priority)
3973 skb = skb_dequeue(&chan->data_q);
3975 hci_send_frame(hdev, skb);
3976 hdev->le_last_tx = jiffies;
3987 hdev->acl_cnt = cnt;
3990 hci_prio_recalculate(hdev, LE_LINK);
3993 static void hci_tx_work(struct work_struct *work)
3995 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3996 struct sk_buff *skb;
3998 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3999 hdev->sco_cnt, hdev->le_cnt);
4001 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4002 /* Schedule queues and send stuff to HCI driver */
4003 hci_sched_acl(hdev);
4004 hci_sched_sco(hdev);
4005 hci_sched_esco(hdev);
4009 /* Send next queued raw (unknown type) packet */
4010 while ((skb = skb_dequeue(&hdev->raw_q)))
4011 hci_send_frame(hdev, skb);
4014 /* ----- HCI RX task (incoming data processing) ----- */
4016 /* ACL data packet */
4017 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4019 struct hci_acl_hdr *hdr = (void *) skb->data;
4020 struct hci_conn *conn;
4021 __u16 handle, flags;
4023 skb_pull(skb, HCI_ACL_HDR_SIZE);
4025 handle = __le16_to_cpu(hdr->handle);
4026 flags = hci_flags(handle);
4027 handle = hci_handle(handle);
4029 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4032 hdev->stat.acl_rx++;
4035 conn = hci_conn_hash_lookup_handle(hdev, handle);
4036 hci_dev_unlock(hdev);
4039 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4041 /* Send to upper protocol */
4042 l2cap_recv_acldata(conn, skb, flags);
4045 BT_ERR("%s ACL packet for unknown connection handle %d",
4046 hdev->name, handle);
4052 /* SCO data packet */
4053 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4055 struct hci_sco_hdr *hdr = (void *) skb->data;
4056 struct hci_conn *conn;
4059 skb_pull(skb, HCI_SCO_HDR_SIZE);
4061 handle = __le16_to_cpu(hdr->handle);
4063 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4065 hdev->stat.sco_rx++;
4068 conn = hci_conn_hash_lookup_handle(hdev, handle);
4069 hci_dev_unlock(hdev);
4072 /* Send to upper protocol */
4073 sco_recv_scodata(conn, skb);
4076 BT_ERR("%s SCO packet for unknown connection handle %d",
4077 hdev->name, handle);
4083 static bool hci_req_is_complete(struct hci_dev *hdev)
4085 struct sk_buff *skb;
4087 skb = skb_peek(&hdev->cmd_q);
4091 return bt_cb(skb)->req.start;
4094 static void hci_resend_last(struct hci_dev *hdev)
4096 struct hci_command_hdr *sent;
4097 struct sk_buff *skb;
4100 if (!hdev->sent_cmd)
4103 sent = (void *) hdev->sent_cmd->data;
4104 opcode = __le16_to_cpu(sent->opcode);
4105 if (opcode == HCI_OP_RESET)
4108 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4112 skb_queue_head(&hdev->cmd_q, skb);
4113 queue_work(hdev->workqueue, &hdev->cmd_work);
4116 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4118 hci_req_complete_t req_complete = NULL;
4119 struct sk_buff *skb;
4120 unsigned long flags;
4122 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4124 /* If the completed command doesn't match the last one that was
4125 * sent we need to do special handling of it.
4127 if (!hci_sent_cmd_data(hdev, opcode)) {
4128 /* Some CSR based controllers generate a spontaneous
4129 * reset complete event during init and any pending
4130 * command will never be completed. In such a case we
4131 * need to resend whatever was the last sent
4134 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4135 hci_resend_last(hdev);
4140 /* If the command succeeded and there's still more commands in
4141 * this request the request is not yet complete.
4143 if (!status && !hci_req_is_complete(hdev))
4146 /* If this was the last command in a request the complete
4147 * callback would be found in hdev->sent_cmd instead of the
4148 * command queue (hdev->cmd_q).
4150 if (hdev->sent_cmd) {
4151 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4154 /* We must set the complete callback to NULL to
4155 * avoid calling the callback more than once if
4156 * this function gets called again.
4158 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4164 /* Remove all pending commands belonging to this request */
4165 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4166 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4167 if (bt_cb(skb)->req.start) {
4168 __skb_queue_head(&hdev->cmd_q, skb);
4172 req_complete = bt_cb(skb)->req.complete;
4175 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4179 req_complete(hdev, status);
4182 static void hci_rx_work(struct work_struct *work)
4184 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4185 struct sk_buff *skb;
4187 BT_DBG("%s", hdev->name);
4189 while ((skb = skb_dequeue(&hdev->rx_q))) {
4190 /* Send copy to monitor */
4191 hci_send_to_monitor(hdev, skb);
4193 if (atomic_read(&hdev->promisc)) {
4194 /* Send copy to the sockets */
4195 hci_send_to_sock(hdev, skb);
4198 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4203 if (test_bit(HCI_INIT, &hdev->flags)) {
4204 /* Don't process data packets in this states. */
4205 switch (bt_cb(skb)->pkt_type) {
4206 case HCI_ACLDATA_PKT:
4207 case HCI_SCODATA_PKT:
4214 switch (bt_cb(skb)->pkt_type) {
4216 BT_DBG("%s Event packet", hdev->name);
4217 hci_event_packet(hdev, skb);
4220 case HCI_ACLDATA_PKT:
4221 BT_DBG("%s ACL data packet", hdev->name);
4222 hci_acldata_packet(hdev, skb);
4225 case HCI_SCODATA_PKT:
4226 BT_DBG("%s SCO data packet", hdev->name);
4227 hci_scodata_packet(hdev, skb);
4237 static void hci_cmd_work(struct work_struct *work)
4239 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4240 struct sk_buff *skb;
4242 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4243 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4245 /* Send queued commands */
4246 if (atomic_read(&hdev->cmd_cnt)) {
4247 skb = skb_dequeue(&hdev->cmd_q);
4251 kfree_skb(hdev->sent_cmd);
4253 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4254 if (hdev->sent_cmd) {
4255 atomic_dec(&hdev->cmd_cnt);
4256 hci_send_frame(hdev, skb);
4257 if (test_bit(HCI_RESET, &hdev->flags))
4258 cancel_delayed_work(&hdev->cmd_timer);
4260 schedule_delayed_work(&hdev->cmd_timer,
4263 skb_queue_head(&hdev->cmd_q, skb);
4264 queue_work(hdev->workqueue, &hdev->cmd_work);