2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_RWLOCK(hci_cb_list_lock);
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
59 /* ----- HCI requests ----- */
61 #define HCI_REQ_DONE 0
62 #define HCI_REQ_PEND 1
63 #define HCI_REQ_CANCELED 2
65 #define hci_req_lock(d) mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
68 /* ---- HCI notifications ---- */
70 static void hci_notify(struct hci_dev *hdev, int event)
72 hci_sock_dev_event(hdev, event);
75 /* ---- HCI debugfs entries ---- */
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
80 struct hci_dev *hdev = file->private_data;
83 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
92 struct hci_dev *hdev = file->private_data;
95 size_t buf_size = min(count, (sizeof(buf)-1));
99 if (!test_bit(HCI_UP, &hdev->flags))
102 if (copy_from_user(buf, user_buf, buf_size))
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
109 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
119 hci_req_unlock(hdev);
124 err = -bt_to_errno(skb->data[0]);
130 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
135 static const struct file_operations dut_mode_fops = {
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
142 /* ---- HCI requests ---- */
144 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
146 BT_DBG("%s result 0x%2.2x", hdev->name, result);
148 if (hdev->req_status == HCI_REQ_PEND) {
149 hdev->req_result = result;
150 hdev->req_status = HCI_REQ_DONE;
151 wake_up_interruptible(&hdev->req_wait_q);
155 static void hci_req_cancel(struct hci_dev *hdev, int err)
157 BT_DBG("%s err 0x%2.2x", hdev->name, err);
159 if (hdev->req_status == HCI_REQ_PEND) {
160 hdev->req_result = err;
161 hdev->req_status = HCI_REQ_CANCELED;
162 wake_up_interruptible(&hdev->req_wait_q);
166 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
169 struct hci_ev_cmd_complete *ev;
170 struct hci_event_hdr *hdr;
175 skb = hdev->recv_evt;
176 hdev->recv_evt = NULL;
178 hci_dev_unlock(hdev);
181 return ERR_PTR(-ENODATA);
183 if (skb->len < sizeof(*hdr)) {
184 BT_ERR("Too short HCI event");
188 hdr = (void *) skb->data;
189 skb_pull(skb, HCI_EVENT_HDR_SIZE);
192 if (hdr->evt != event)
197 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
202 if (skb->len < sizeof(*ev)) {
203 BT_ERR("Too short cmd_complete event");
207 ev = (void *) skb->data;
208 skb_pull(skb, sizeof(*ev));
210 if (opcode == __le16_to_cpu(ev->opcode))
213 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214 __le16_to_cpu(ev->opcode));
218 return ERR_PTR(-ENODATA);
221 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
222 const void *param, u8 event, u32 timeout)
224 DECLARE_WAITQUEUE(wait, current);
225 struct hci_request req;
228 BT_DBG("%s", hdev->name);
230 hci_req_init(&req, hdev);
232 hci_req_add_ev(&req, opcode, plen, param, event);
234 hdev->req_status = HCI_REQ_PEND;
236 add_wait_queue(&hdev->req_wait_q, &wait);
237 set_current_state(TASK_INTERRUPTIBLE);
239 err = hci_req_run(&req, hci_req_sync_complete);
241 remove_wait_queue(&hdev->req_wait_q, &wait);
242 set_current_state(TASK_RUNNING);
246 schedule_timeout(timeout);
248 remove_wait_queue(&hdev->req_wait_q, &wait);
250 if (signal_pending(current))
251 return ERR_PTR(-EINTR);
253 switch (hdev->req_status) {
255 err = -bt_to_errno(hdev->req_result);
258 case HCI_REQ_CANCELED:
259 err = -hdev->req_result;
267 hdev->req_status = hdev->req_result = 0;
269 BT_DBG("%s end: err %d", hdev->name, err);
274 return hci_get_cmd_complete(hdev, opcode, event);
276 EXPORT_SYMBOL(__hci_cmd_sync_ev);
278 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
279 const void *param, u32 timeout)
281 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
283 EXPORT_SYMBOL(__hci_cmd_sync);
285 /* Execute request and wait for completion. */
286 static int __hci_req_sync(struct hci_dev *hdev,
287 void (*func)(struct hci_request *req,
289 unsigned long opt, __u32 timeout)
291 struct hci_request req;
292 DECLARE_WAITQUEUE(wait, current);
295 BT_DBG("%s start", hdev->name);
297 hci_req_init(&req, hdev);
299 hdev->req_status = HCI_REQ_PEND;
303 add_wait_queue(&hdev->req_wait_q, &wait);
304 set_current_state(TASK_INTERRUPTIBLE);
306 err = hci_req_run(&req, hci_req_sync_complete);
308 hdev->req_status = 0;
310 remove_wait_queue(&hdev->req_wait_q, &wait);
311 set_current_state(TASK_RUNNING);
313 /* ENODATA means the HCI request command queue is empty.
314 * This can happen when a request with conditionals doesn't
315 * trigger any commands to be sent. This is normal behavior
316 * and should not trigger an error return.
324 schedule_timeout(timeout);
326 remove_wait_queue(&hdev->req_wait_q, &wait);
328 if (signal_pending(current))
331 switch (hdev->req_status) {
333 err = -bt_to_errno(hdev->req_result);
336 case HCI_REQ_CANCELED:
337 err = -hdev->req_result;
345 hdev->req_status = hdev->req_result = 0;
347 BT_DBG("%s end: err %d", hdev->name, err);
352 static int hci_req_sync(struct hci_dev *hdev,
353 void (*req)(struct hci_request *req,
355 unsigned long opt, __u32 timeout)
359 if (!test_bit(HCI_UP, &hdev->flags))
362 /* Serialize all requests */
364 ret = __hci_req_sync(hdev, req, opt, timeout);
365 hci_req_unlock(hdev);
370 static void hci_reset_req(struct hci_request *req, unsigned long opt)
372 BT_DBG("%s %ld", req->hdev->name, opt);
375 set_bit(HCI_RESET, &req->hdev->flags);
376 hci_req_add(req, HCI_OP_RESET, 0, NULL);
379 static void bredr_init(struct hci_request *req)
381 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
383 /* Read Local Supported Features */
384 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
386 /* Read Local Version */
387 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
389 /* Read BD Address */
390 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
393 static void amp_init(struct hci_request *req)
395 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
397 /* Read Local Version */
398 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
400 /* Read Local Supported Commands */
401 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
403 /* Read Local Supported Features */
404 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
406 /* Read Local AMP Info */
407 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
409 /* Read Data Blk size */
410 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
412 /* Read Flow Control Mode */
413 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
415 /* Read Location Data */
416 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
419 static void hci_init1_req(struct hci_request *req, unsigned long opt)
421 struct hci_dev *hdev = req->hdev;
423 BT_DBG("%s %ld", hdev->name, opt);
426 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
427 hci_reset_req(req, 0);
429 switch (hdev->dev_type) {
439 BT_ERR("Unknown device type %d", hdev->dev_type);
444 static void bredr_setup(struct hci_request *req)
449 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
450 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
452 /* Read Class of Device */
453 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
455 /* Read Local Name */
456 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
458 /* Read Voice Setting */
459 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
461 /* Read Number of Supported IAC */
462 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
464 /* Read Current IAC LAP */
465 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
467 /* Clear Event Filters */
468 flt_type = HCI_FLT_CLEAR_ALL;
469 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
471 /* Connection accept timeout ~20 secs */
472 param = cpu_to_le16(0x7d00);
473 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
476 static void le_setup(struct hci_request *req)
478 struct hci_dev *hdev = req->hdev;
480 /* Read LE Buffer Size */
481 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
483 /* Read LE Local Supported Features */
484 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
486 /* Read LE Supported States */
487 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
489 /* Read LE White List Size */
490 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
492 /* Clear LE White List */
493 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
495 /* LE-only controllers have LE implicitly enabled */
496 if (!lmp_bredr_capable(hdev))
497 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
500 static void hci_setup_event_mask(struct hci_request *req)
502 struct hci_dev *hdev = req->hdev;
504 /* The second byte is 0xff instead of 0x9f (two reserved bits
505 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
508 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
510 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
511 * any event mask for pre 1.2 devices.
513 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
516 if (lmp_bredr_capable(hdev)) {
517 events[4] |= 0x01; /* Flow Specification Complete */
518 events[4] |= 0x02; /* Inquiry Result with RSSI */
519 events[4] |= 0x04; /* Read Remote Extended Features Complete */
520 events[5] |= 0x08; /* Synchronous Connection Complete */
521 events[5] |= 0x10; /* Synchronous Connection Changed */
523 /* Use a different default for LE-only devices */
524 memset(events, 0, sizeof(events));
525 events[0] |= 0x10; /* Disconnection Complete */
526 events[1] |= 0x08; /* Read Remote Version Information Complete */
527 events[1] |= 0x20; /* Command Complete */
528 events[1] |= 0x40; /* Command Status */
529 events[1] |= 0x80; /* Hardware Error */
530 events[2] |= 0x04; /* Number of Completed Packets */
531 events[3] |= 0x02; /* Data Buffer Overflow */
533 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
534 events[0] |= 0x80; /* Encryption Change */
535 events[5] |= 0x80; /* Encryption Key Refresh Complete */
539 if (lmp_inq_rssi_capable(hdev))
540 events[4] |= 0x02; /* Inquiry Result with RSSI */
542 if (lmp_sniffsubr_capable(hdev))
543 events[5] |= 0x20; /* Sniff Subrating */
545 if (lmp_pause_enc_capable(hdev))
546 events[5] |= 0x80; /* Encryption Key Refresh Complete */
548 if (lmp_ext_inq_capable(hdev))
549 events[5] |= 0x40; /* Extended Inquiry Result */
551 if (lmp_no_flush_capable(hdev))
552 events[7] |= 0x01; /* Enhanced Flush Complete */
554 if (lmp_lsto_capable(hdev))
555 events[6] |= 0x80; /* Link Supervision Timeout Changed */
557 if (lmp_ssp_capable(hdev)) {
558 events[6] |= 0x01; /* IO Capability Request */
559 events[6] |= 0x02; /* IO Capability Response */
560 events[6] |= 0x04; /* User Confirmation Request */
561 events[6] |= 0x08; /* User Passkey Request */
562 events[6] |= 0x10; /* Remote OOB Data Request */
563 events[6] |= 0x20; /* Simple Pairing Complete */
564 events[7] |= 0x04; /* User Passkey Notification */
565 events[7] |= 0x08; /* Keypress Notification */
566 events[7] |= 0x10; /* Remote Host Supported
567 * Features Notification
571 if (lmp_le_capable(hdev))
572 events[7] |= 0x20; /* LE Meta-Event */
574 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
577 static void hci_init2_req(struct hci_request *req, unsigned long opt)
579 struct hci_dev *hdev = req->hdev;
581 if (lmp_bredr_capable(hdev))
584 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
586 if (lmp_le_capable(hdev))
589 /* All Bluetooth 1.2 and later controllers should support the
590 * HCI command for reading the local supported commands.
592 * Unfortunately some controllers indicate Bluetooth 1.2 support,
593 * but do not have support for this command. If that is the case,
594 * the driver can quirk the behavior and skip reading the local
595 * supported commands.
597 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
598 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
599 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
601 if (lmp_ssp_capable(hdev)) {
602 /* When SSP is available, then the host features page
603 * should also be available as well. However some
604 * controllers list the max_page as 0 as long as SSP
605 * has not been enabled. To achieve proper debugging
606 * output, force the minimum max_page to 1 at least.
608 hdev->max_page = 0x01;
610 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
612 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
613 sizeof(mode), &mode);
615 struct hci_cp_write_eir cp;
617 memset(hdev->eir, 0, sizeof(hdev->eir));
618 memset(&cp, 0, sizeof(cp));
620 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
624 if (lmp_inq_rssi_capable(hdev) ||
625 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
628 /* If Extended Inquiry Result events are supported, then
629 * they are clearly preferred over Inquiry Result with RSSI
632 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
634 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
637 if (lmp_inq_tx_pwr_capable(hdev))
638 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
640 if (lmp_ext_feat_capable(hdev)) {
641 struct hci_cp_read_local_ext_features cp;
644 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
648 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
650 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
655 static void hci_setup_link_policy(struct hci_request *req)
657 struct hci_dev *hdev = req->hdev;
658 struct hci_cp_write_def_link_policy cp;
661 if (lmp_rswitch_capable(hdev))
662 link_policy |= HCI_LP_RSWITCH;
663 if (lmp_hold_capable(hdev))
664 link_policy |= HCI_LP_HOLD;
665 if (lmp_sniff_capable(hdev))
666 link_policy |= HCI_LP_SNIFF;
667 if (lmp_park_capable(hdev))
668 link_policy |= HCI_LP_PARK;
670 cp.policy = cpu_to_le16(link_policy);
671 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
674 static void hci_set_le_support(struct hci_request *req)
676 struct hci_dev *hdev = req->hdev;
677 struct hci_cp_write_le_host_supported cp;
679 /* LE-only devices do not support explicit enablement */
680 if (!lmp_bredr_capable(hdev))
683 memset(&cp, 0, sizeof(cp));
685 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
690 if (cp.le != lmp_host_le_capable(hdev))
691 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
695 static void hci_set_event_mask_page_2(struct hci_request *req)
697 struct hci_dev *hdev = req->hdev;
698 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
700 /* If Connectionless Slave Broadcast master role is supported
701 * enable all necessary events for it.
703 if (lmp_csb_master_capable(hdev)) {
704 events[1] |= 0x40; /* Triggered Clock Capture */
705 events[1] |= 0x80; /* Synchronization Train Complete */
706 events[2] |= 0x10; /* Slave Page Response Timeout */
707 events[2] |= 0x20; /* CSB Channel Map Change */
710 /* If Connectionless Slave Broadcast slave role is supported
711 * enable all necessary events for it.
713 if (lmp_csb_slave_capable(hdev)) {
714 events[2] |= 0x01; /* Synchronization Train Received */
715 events[2] |= 0x02; /* CSB Receive */
716 events[2] |= 0x04; /* CSB Timeout */
717 events[2] |= 0x08; /* Truncated Page Complete */
720 /* Enable Authenticated Payload Timeout Expired event if supported */
721 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
724 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
727 static void hci_init3_req(struct hci_request *req, unsigned long opt)
729 struct hci_dev *hdev = req->hdev;
732 hci_setup_event_mask(req);
734 if (hdev->commands[6] & 0x20) {
735 struct hci_cp_read_stored_link_key cp;
737 bacpy(&cp.bdaddr, BDADDR_ANY);
739 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
742 /* Some Broadcom based Bluetooth controllers do not support the
743 * Delete Stored Link Key command. They are clearly indicating its
744 * absence in the bit mask of supported commands.
746 * Check the supported commands and only if the the command is marked
747 * as supported send it. If not supported assume that the controller
748 * does not have actual support for stored link keys which makes this
749 * command redundant anyway.
751 * Some controllers indicate that they support handling deleting
752 * stored link keys, but they don't. The quirk lets a driver
753 * just disable this command.
755 if (hdev->commands[6] & 0x80 &&
756 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
757 struct hci_cp_delete_stored_link_key cp;
759 bacpy(&cp.bdaddr, BDADDR_ANY);
760 cp.delete_all = 0x01;
761 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
765 if (hdev->commands[5] & 0x10)
766 hci_setup_link_policy(req);
768 if (hdev->commands[8] & 0x01)
769 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
771 /* Some older Broadcom based Bluetooth 1.2 controllers do not
772 * support the Read Page Scan Type command. Check support for
773 * this command in the bit mask of supported commands.
775 if (hdev->commands[13] & 0x01)
776 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
778 if (lmp_le_capable(hdev)) {
781 memset(events, 0, sizeof(events));
784 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
785 events[0] |= 0x10; /* LE Long Term Key Request */
787 /* If controller supports the Connection Parameters Request
788 * Link Layer Procedure, enable the corresponding event.
790 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
791 events[0] |= 0x20; /* LE Remote Connection
795 /* If the controller supports the Data Length Extension
796 * feature, enable the corresponding event.
798 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
799 events[0] |= 0x40; /* LE Data Length Change */
801 /* If the controller supports Extended Scanner Filter
802 * Policies, enable the correspondig event.
804 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
805 events[1] |= 0x04; /* LE Direct Advertising
809 /* If the controller supports the LE Read Local P-256
810 * Public Key command, enable the corresponding event.
812 if (hdev->commands[34] & 0x02)
813 events[0] |= 0x80; /* LE Read Local P-256
814 * Public Key Complete
817 /* If the controller supports the LE Generate DHKey
818 * command, enable the corresponding event.
820 if (hdev->commands[34] & 0x04)
821 events[1] |= 0x01; /* LE Generate DHKey Complete */
823 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
826 if (hdev->commands[25] & 0x40) {
827 /* Read LE Advertising Channel TX Power */
828 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
831 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
832 /* Read LE Maximum Data Length */
833 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
835 /* Read LE Suggested Default Data Length */
836 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
839 hci_set_le_support(req);
842 /* Read features beyond page 1 if available */
843 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
844 struct hci_cp_read_local_ext_features cp;
847 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
852 static void hci_init4_req(struct hci_request *req, unsigned long opt)
854 struct hci_dev *hdev = req->hdev;
856 /* Set event mask page 2 if the HCI command for it is supported */
857 if (hdev->commands[22] & 0x04)
858 hci_set_event_mask_page_2(req);
860 /* Read local codec list if the HCI command is supported */
861 if (hdev->commands[29] & 0x20)
862 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
864 /* Get MWS transport configuration if the HCI command is supported */
865 if (hdev->commands[30] & 0x08)
866 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
868 /* Check for Synchronization Train support */
869 if (lmp_sync_train_capable(hdev))
870 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
872 /* Enable Secure Connections if supported and configured */
873 if (bredr_sc_enabled(hdev)) {
875 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
876 sizeof(support), &support);
880 static int __hci_init(struct hci_dev *hdev)
884 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
888 /* The Device Under Test (DUT) mode is special and available for
889 * all controller types. So just create it early on.
891 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
892 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
896 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
897 * BR/EDR/LE type controllers. AMP controllers only need the
900 if (hdev->dev_type != HCI_BREDR)
903 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
907 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
911 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
915 /* This function is only called when the controller is actually in
916 * configured state. When the controller is marked as unconfigured,
917 * this initialization procedure is not run.
919 * It means that it is possible that a controller runs through its
920 * setup phase and then discovers missing settings. If that is the
921 * case, then this function will not be called. It then will only
922 * be called during the config phase.
924 * So only when in setup phase or config phase, create the debugfs
925 * entries and register the SMP channels.
927 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
928 !test_bit(HCI_CONFIG, &hdev->dev_flags))
931 hci_debugfs_create_common(hdev);
933 if (lmp_bredr_capable(hdev))
934 hci_debugfs_create_bredr(hdev);
936 if (lmp_le_capable(hdev)) {
937 hci_debugfs_create_le(hdev);
944 static void hci_init0_req(struct hci_request *req, unsigned long opt)
946 struct hci_dev *hdev = req->hdev;
948 BT_DBG("%s %ld", hdev->name, opt);
951 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
952 hci_reset_req(req, 0);
954 /* Read Local Version */
955 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
957 /* Read BD Address */
958 if (hdev->set_bdaddr)
959 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
962 static int __hci_unconf_init(struct hci_dev *hdev)
966 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
969 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
976 static void hci_scan_req(struct hci_request *req, unsigned long opt)
980 BT_DBG("%s %x", req->hdev->name, scan);
982 /* Inquiry and Page scans */
983 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
986 static void hci_auth_req(struct hci_request *req, unsigned long opt)
990 BT_DBG("%s %x", req->hdev->name, auth);
993 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
996 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1000 BT_DBG("%s %x", req->hdev->name, encrypt);
1003 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1006 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1008 __le16 policy = cpu_to_le16(opt);
1010 BT_DBG("%s %x", req->hdev->name, policy);
1012 /* Default link policy */
1013 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1016 /* Get HCI device by index.
1017 * Device is held on return. */
1018 struct hci_dev *hci_dev_get(int index)
1020 struct hci_dev *hdev = NULL, *d;
1022 BT_DBG("%d", index);
1027 read_lock(&hci_dev_list_lock);
1028 list_for_each_entry(d, &hci_dev_list, list) {
1029 if (d->id == index) {
1030 hdev = hci_dev_hold(d);
1034 read_unlock(&hci_dev_list_lock);
1038 /* ---- Inquiry support ---- */
1040 bool hci_discovery_active(struct hci_dev *hdev)
1042 struct discovery_state *discov = &hdev->discovery;
1044 switch (discov->state) {
1045 case DISCOVERY_FINDING:
1046 case DISCOVERY_RESOLVING:
1054 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1056 int old_state = hdev->discovery.state;
1058 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1060 if (old_state == state)
1063 hdev->discovery.state = state;
1066 case DISCOVERY_STOPPED:
1067 hci_update_background_scan(hdev);
1069 if (old_state != DISCOVERY_STARTING)
1070 mgmt_discovering(hdev, 0);
1072 case DISCOVERY_STARTING:
1074 case DISCOVERY_FINDING:
1075 mgmt_discovering(hdev, 1);
1077 case DISCOVERY_RESOLVING:
1079 case DISCOVERY_STOPPING:
1084 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1086 struct discovery_state *cache = &hdev->discovery;
1087 struct inquiry_entry *p, *n;
1089 list_for_each_entry_safe(p, n, &cache->all, all) {
1094 INIT_LIST_HEAD(&cache->unknown);
1095 INIT_LIST_HEAD(&cache->resolve);
1098 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1101 struct discovery_state *cache = &hdev->discovery;
1102 struct inquiry_entry *e;
1104 BT_DBG("cache %p, %pMR", cache, bdaddr);
1106 list_for_each_entry(e, &cache->all, all) {
1107 if (!bacmp(&e->data.bdaddr, bdaddr))
1114 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1117 struct discovery_state *cache = &hdev->discovery;
1118 struct inquiry_entry *e;
1120 BT_DBG("cache %p, %pMR", cache, bdaddr);
1122 list_for_each_entry(e, &cache->unknown, list) {
1123 if (!bacmp(&e->data.bdaddr, bdaddr))
1130 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1134 struct discovery_state *cache = &hdev->discovery;
1135 struct inquiry_entry *e;
1137 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1139 list_for_each_entry(e, &cache->resolve, list) {
1140 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1142 if (!bacmp(&e->data.bdaddr, bdaddr))
1149 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1150 struct inquiry_entry *ie)
1152 struct discovery_state *cache = &hdev->discovery;
1153 struct list_head *pos = &cache->resolve;
1154 struct inquiry_entry *p;
1156 list_del(&ie->list);
1158 list_for_each_entry(p, &cache->resolve, list) {
1159 if (p->name_state != NAME_PENDING &&
1160 abs(p->data.rssi) >= abs(ie->data.rssi))
1165 list_add(&ie->list, pos);
1168 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1171 struct discovery_state *cache = &hdev->discovery;
1172 struct inquiry_entry *ie;
1175 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1177 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1179 if (!data->ssp_mode)
1180 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1182 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1184 if (!ie->data.ssp_mode)
1185 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1187 if (ie->name_state == NAME_NEEDED &&
1188 data->rssi != ie->data.rssi) {
1189 ie->data.rssi = data->rssi;
1190 hci_inquiry_cache_update_resolve(hdev, ie);
1196 /* Entry not in the cache. Add new one. */
1197 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1199 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1203 list_add(&ie->all, &cache->all);
1206 ie->name_state = NAME_KNOWN;
1208 ie->name_state = NAME_NOT_KNOWN;
1209 list_add(&ie->list, &cache->unknown);
1213 if (name_known && ie->name_state != NAME_KNOWN &&
1214 ie->name_state != NAME_PENDING) {
1215 ie->name_state = NAME_KNOWN;
1216 list_del(&ie->list);
1219 memcpy(&ie->data, data, sizeof(*data));
1220 ie->timestamp = jiffies;
1221 cache->timestamp = jiffies;
1223 if (ie->name_state == NAME_NOT_KNOWN)
1224 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1230 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1232 struct discovery_state *cache = &hdev->discovery;
1233 struct inquiry_info *info = (struct inquiry_info *) buf;
1234 struct inquiry_entry *e;
1237 list_for_each_entry(e, &cache->all, all) {
1238 struct inquiry_data *data = &e->data;
1243 bacpy(&info->bdaddr, &data->bdaddr);
1244 info->pscan_rep_mode = data->pscan_rep_mode;
1245 info->pscan_period_mode = data->pscan_period_mode;
1246 info->pscan_mode = data->pscan_mode;
1247 memcpy(info->dev_class, data->dev_class, 3);
1248 info->clock_offset = data->clock_offset;
1254 BT_DBG("cache %p, copied %d", cache, copied);
1258 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1260 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1261 struct hci_dev *hdev = req->hdev;
1262 struct hci_cp_inquiry cp;
1264 BT_DBG("%s", hdev->name);
1266 if (test_bit(HCI_INQUIRY, &hdev->flags))
1270 memcpy(&cp.lap, &ir->lap, 3);
1271 cp.length = ir->length;
1272 cp.num_rsp = ir->num_rsp;
1273 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1276 int hci_inquiry(void __user *arg)
1278 __u8 __user *ptr = arg;
1279 struct hci_inquiry_req ir;
1280 struct hci_dev *hdev;
1281 int err = 0, do_inquiry = 0, max_rsp;
1285 if (copy_from_user(&ir, ptr, sizeof(ir)))
1288 hdev = hci_dev_get(ir.dev_id);
1292 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1297 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1302 if (hdev->dev_type != HCI_BREDR) {
1307 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1313 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1314 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1315 hci_inquiry_cache_flush(hdev);
1318 hci_dev_unlock(hdev);
1320 timeo = ir.length * msecs_to_jiffies(2000);
1323 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1328 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1329 * cleared). If it is interrupted by a signal, return -EINTR.
1331 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1332 TASK_INTERRUPTIBLE))
1336 /* for unlimited number of responses we will use buffer with
1339 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1341 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1342 * copy it to the user space.
1344 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1351 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1352 hci_dev_unlock(hdev);
1354 BT_DBG("num_rsp %d", ir.num_rsp);
1356 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1358 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1371 static int hci_dev_do_open(struct hci_dev *hdev)
1375 BT_DBG("%s %p", hdev->name, hdev);
1379 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1384 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1385 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1386 /* Check for rfkill but allow the HCI setup stage to
1387 * proceed (which in itself doesn't cause any RF activity).
1389 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1394 /* Check for valid public address or a configured static
1395 * random adddress, but let the HCI setup proceed to
1396 * be able to determine if there is a public address
1399 * In case of user channel usage, it is not important
1400 * if a public address or static random address is
1403 * This check is only valid for BR/EDR controllers
1404 * since AMP controllers do not have an address.
1406 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1407 hdev->dev_type == HCI_BREDR &&
1408 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1409 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1410 ret = -EADDRNOTAVAIL;
1415 if (test_bit(HCI_UP, &hdev->flags)) {
1420 if (hdev->open(hdev)) {
1425 atomic_set(&hdev->cmd_cnt, 1);
1426 set_bit(HCI_INIT, &hdev->flags);
1428 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1430 ret = hdev->setup(hdev);
1432 /* The transport driver can set these quirks before
1433 * creating the HCI device or in its setup callback.
1435 * In case any of them is set, the controller has to
1436 * start up as unconfigured.
1438 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1439 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1440 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
1442 /* For an unconfigured controller it is required to
1443 * read at least the version information provided by
1444 * the Read Local Version Information command.
1446 * If the set_bdaddr driver callback is provided, then
1447 * also the original Bluetooth public device address
1448 * will be read using the Read BD Address command.
1450 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1451 ret = __hci_unconf_init(hdev);
1454 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1455 /* If public address change is configured, ensure that
1456 * the address gets programmed. If the driver does not
1457 * support changing the public address, fail the power
1460 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1462 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1464 ret = -EADDRNOTAVAIL;
1468 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1469 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1470 ret = __hci_init(hdev);
1473 clear_bit(HCI_INIT, &hdev->flags);
1477 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1478 set_bit(HCI_UP, &hdev->flags);
1479 hci_notify(hdev, HCI_DEV_UP);
1480 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1481 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
1482 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1483 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1484 hdev->dev_type == HCI_BREDR) {
1486 mgmt_powered(hdev, 1);
1487 hci_dev_unlock(hdev);
1490 /* Init failed, cleanup */
1491 flush_work(&hdev->tx_work);
1492 flush_work(&hdev->cmd_work);
1493 flush_work(&hdev->rx_work);
1495 skb_queue_purge(&hdev->cmd_q);
1496 skb_queue_purge(&hdev->rx_q);
1501 if (hdev->sent_cmd) {
1502 kfree_skb(hdev->sent_cmd);
1503 hdev->sent_cmd = NULL;
1507 hdev->flags &= BIT(HCI_RAW);
1511 hci_req_unlock(hdev);
1515 /* ---- HCI ioctl helpers ---- */
1517 int hci_dev_open(__u16 dev)
1519 struct hci_dev *hdev;
1522 hdev = hci_dev_get(dev);
1526 /* Devices that are marked as unconfigured can only be powered
1527 * up as user channel. Trying to bring them up as normal devices
1528 * will result into a failure. Only user channel operation is
1531 * When this function is called for a user channel, the flag
1532 * HCI_USER_CHANNEL will be set first before attempting to
1535 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1536 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1541 /* We need to ensure that no other power on/off work is pending
1542 * before proceeding to call hci_dev_do_open. This is
1543 * particularly important if the setup procedure has not yet
1546 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1547 cancel_delayed_work(&hdev->power_off);
1549 /* After this call it is guaranteed that the setup procedure
1550 * has finished. This means that error conditions like RFKILL
1551 * or no valid public or static random address apply.
1553 flush_workqueue(hdev->req_workqueue);
1555 /* For controllers not using the management interface and that
1556 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1557 * so that pairing works for them. Once the management interface
1558 * is in use this bit will be cleared again and userspace has
1559 * to explicitly enable it.
1561 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1562 !test_bit(HCI_MGMT, &hdev->dev_flags))
1563 set_bit(HCI_BONDABLE, &hdev->dev_flags);
1565 err = hci_dev_do_open(hdev);
1572 /* This function requires the caller holds hdev->lock */
1573 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1575 struct hci_conn_params *p;
1577 list_for_each_entry(p, &hdev->le_conn_params, list) {
1579 hci_conn_drop(p->conn);
1580 hci_conn_put(p->conn);
1583 list_del_init(&p->action);
1586 BT_DBG("All LE pending actions cleared");
1589 static int hci_dev_do_close(struct hci_dev *hdev)
1591 BT_DBG("%s %p", hdev->name, hdev);
1593 cancel_delayed_work(&hdev->power_off);
1595 hci_req_cancel(hdev, ENODEV);
1598 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1599 cancel_delayed_work_sync(&hdev->cmd_timer);
1600 hci_req_unlock(hdev);
1604 /* Flush RX and TX works */
1605 flush_work(&hdev->tx_work);
1606 flush_work(&hdev->rx_work);
1608 if (hdev->discov_timeout > 0) {
1609 cancel_delayed_work(&hdev->discov_off);
1610 hdev->discov_timeout = 0;
1611 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1612 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1615 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1616 cancel_delayed_work(&hdev->service_cache);
1618 cancel_delayed_work_sync(&hdev->le_scan_disable);
1620 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1621 cancel_delayed_work_sync(&hdev->rpa_expired);
1623 /* Avoid potential lockdep warnings from the *_flush() calls by
1624 * ensuring the workqueue is empty up front.
1626 drain_workqueue(hdev->workqueue);
1630 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1631 if (hdev->dev_type == HCI_BREDR)
1632 mgmt_powered(hdev, 0);
1635 hci_inquiry_cache_flush(hdev);
1636 hci_pend_le_actions_clear(hdev);
1637 hci_conn_hash_flush(hdev);
1638 hci_dev_unlock(hdev);
1640 hci_notify(hdev, HCI_DEV_DOWN);
1646 skb_queue_purge(&hdev->cmd_q);
1647 atomic_set(&hdev->cmd_cnt, 1);
1648 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1649 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1650 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1651 set_bit(HCI_INIT, &hdev->flags);
1652 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1653 clear_bit(HCI_INIT, &hdev->flags);
1656 /* flush cmd work */
1657 flush_work(&hdev->cmd_work);
1660 skb_queue_purge(&hdev->rx_q);
1661 skb_queue_purge(&hdev->cmd_q);
1662 skb_queue_purge(&hdev->raw_q);
1664 /* Drop last sent command */
1665 if (hdev->sent_cmd) {
1666 cancel_delayed_work_sync(&hdev->cmd_timer);
1667 kfree_skb(hdev->sent_cmd);
1668 hdev->sent_cmd = NULL;
1671 kfree_skb(hdev->recv_evt);
1672 hdev->recv_evt = NULL;
1674 /* After this point our queues are empty
1675 * and no tasks are scheduled. */
1679 hdev->flags &= BIT(HCI_RAW);
1680 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1682 /* Controller radio is available but is currently powered down */
1683 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1685 memset(hdev->eir, 0, sizeof(hdev->eir));
1686 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1687 bacpy(&hdev->random_addr, BDADDR_ANY);
1689 hci_req_unlock(hdev);
1695 int hci_dev_close(__u16 dev)
1697 struct hci_dev *hdev;
1700 hdev = hci_dev_get(dev);
1704 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1709 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1710 cancel_delayed_work(&hdev->power_off);
1712 err = hci_dev_do_close(hdev);
1719 int hci_dev_reset(__u16 dev)
1721 struct hci_dev *hdev;
1724 hdev = hci_dev_get(dev);
1730 if (!test_bit(HCI_UP, &hdev->flags)) {
1735 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1740 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1746 skb_queue_purge(&hdev->rx_q);
1747 skb_queue_purge(&hdev->cmd_q);
1749 /* Avoid potential lockdep warnings from the *_flush() calls by
1750 * ensuring the workqueue is empty up front.
1752 drain_workqueue(hdev->workqueue);
1755 hci_inquiry_cache_flush(hdev);
1756 hci_conn_hash_flush(hdev);
1757 hci_dev_unlock(hdev);
1762 atomic_set(&hdev->cmd_cnt, 1);
1763 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1765 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1768 hci_req_unlock(hdev);
1773 int hci_dev_reset_stat(__u16 dev)
1775 struct hci_dev *hdev;
1778 hdev = hci_dev_get(dev);
1782 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1787 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1792 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1799 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1801 bool conn_changed, discov_changed;
1803 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1805 if ((scan & SCAN_PAGE))
1806 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1809 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1812 if ((scan & SCAN_INQUIRY)) {
1813 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1816 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1817 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1821 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1824 if (conn_changed || discov_changed) {
1825 /* In case this was disabled through mgmt */
1826 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1828 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1829 mgmt_update_adv_data(hdev);
1831 mgmt_new_settings(hdev);
1835 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1837 struct hci_dev *hdev;
1838 struct hci_dev_req dr;
1841 if (copy_from_user(&dr, arg, sizeof(dr)))
1844 hdev = hci_dev_get(dr.dev_id);
1848 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1853 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1858 if (hdev->dev_type != HCI_BREDR) {
1863 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1870 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1875 if (!lmp_encrypt_capable(hdev)) {
1880 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1881 /* Auth must be enabled first */
1882 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1888 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1893 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1896 /* Ensure that the connectable and discoverable states
1897 * get correctly modified as this was a non-mgmt change.
1900 hci_update_scan_state(hdev, dr.dev_opt);
1904 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1908 case HCISETLINKMODE:
1909 hdev->link_mode = ((__u16) dr.dev_opt) &
1910 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1914 hdev->pkt_type = (__u16) dr.dev_opt;
1918 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1919 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1923 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1924 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1937 int hci_get_dev_list(void __user *arg)
1939 struct hci_dev *hdev;
1940 struct hci_dev_list_req *dl;
1941 struct hci_dev_req *dr;
1942 int n = 0, size, err;
1945 if (get_user(dev_num, (__u16 __user *) arg))
1948 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1951 size = sizeof(*dl) + dev_num * sizeof(*dr);
1953 dl = kzalloc(size, GFP_KERNEL);
1959 read_lock(&hci_dev_list_lock);
1960 list_for_each_entry(hdev, &hci_dev_list, list) {
1961 unsigned long flags = hdev->flags;
1963 /* When the auto-off is configured it means the transport
1964 * is running, but in that case still indicate that the
1965 * device is actually down.
1967 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1968 flags &= ~BIT(HCI_UP);
1970 (dr + n)->dev_id = hdev->id;
1971 (dr + n)->dev_opt = flags;
1976 read_unlock(&hci_dev_list_lock);
1979 size = sizeof(*dl) + n * sizeof(*dr);
1981 err = copy_to_user(arg, dl, size);
1984 return err ? -EFAULT : 0;
1987 int hci_get_dev_info(void __user *arg)
1989 struct hci_dev *hdev;
1990 struct hci_dev_info di;
1991 unsigned long flags;
1994 if (copy_from_user(&di, arg, sizeof(di)))
1997 hdev = hci_dev_get(di.dev_id);
2001 /* When the auto-off is configured it means the transport
2002 * is running, but in that case still indicate that the
2003 * device is actually down.
2005 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2006 flags = hdev->flags & ~BIT(HCI_UP);
2008 flags = hdev->flags;
2010 strcpy(di.name, hdev->name);
2011 di.bdaddr = hdev->bdaddr;
2012 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2014 di.pkt_type = hdev->pkt_type;
2015 if (lmp_bredr_capable(hdev)) {
2016 di.acl_mtu = hdev->acl_mtu;
2017 di.acl_pkts = hdev->acl_pkts;
2018 di.sco_mtu = hdev->sco_mtu;
2019 di.sco_pkts = hdev->sco_pkts;
2021 di.acl_mtu = hdev->le_mtu;
2022 di.acl_pkts = hdev->le_pkts;
2026 di.link_policy = hdev->link_policy;
2027 di.link_mode = hdev->link_mode;
2029 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2030 memcpy(&di.features, &hdev->features, sizeof(di.features));
2032 if (copy_to_user(arg, &di, sizeof(di)))
2040 /* ---- Interface to HCI drivers ---- */
2042 static int hci_rfkill_set_block(void *data, bool blocked)
2044 struct hci_dev *hdev = data;
2046 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2048 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2052 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2053 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2054 !test_bit(HCI_CONFIG, &hdev->dev_flags))
2055 hci_dev_do_close(hdev);
2057 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2063 static const struct rfkill_ops hci_rfkill_ops = {
2064 .set_block = hci_rfkill_set_block,
2067 static void hci_power_on(struct work_struct *work)
2069 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2072 BT_DBG("%s", hdev->name);
2074 err = hci_dev_do_open(hdev);
2077 mgmt_set_powered_failed(hdev, err);
2078 hci_dev_unlock(hdev);
2082 /* During the HCI setup phase, a few error conditions are
2083 * ignored and they need to be checked now. If they are still
2084 * valid, it is important to turn the device back off.
2086 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2087 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2088 (hdev->dev_type == HCI_BREDR &&
2089 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2090 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2091 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2092 hci_dev_do_close(hdev);
2093 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2094 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2095 HCI_AUTO_OFF_TIMEOUT);
2098 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2099 /* For unconfigured devices, set the HCI_RAW flag
2100 * so that userspace can easily identify them.
2102 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2103 set_bit(HCI_RAW, &hdev->flags);
2105 /* For fully configured devices, this will send
2106 * the Index Added event. For unconfigured devices,
2107 * it will send Unconfigued Index Added event.
2109 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2110 * and no event will be send.
2112 mgmt_index_added(hdev);
2113 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2114 /* When the controller is now configured, then it
2115 * is important to clear the HCI_RAW flag.
2117 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2118 clear_bit(HCI_RAW, &hdev->flags);
2120 /* Powering on the controller with HCI_CONFIG set only
2121 * happens with the transition from unconfigured to
2122 * configured. This will send the Index Added event.
2124 mgmt_index_added(hdev);
2128 static void hci_power_off(struct work_struct *work)
2130 struct hci_dev *hdev = container_of(work, struct hci_dev,
2133 BT_DBG("%s", hdev->name);
2135 hci_dev_do_close(hdev);
2138 static void hci_discov_off(struct work_struct *work)
2140 struct hci_dev *hdev;
2142 hdev = container_of(work, struct hci_dev, discov_off.work);
2144 BT_DBG("%s", hdev->name);
2146 mgmt_discoverable_timeout(hdev);
2149 void hci_uuids_clear(struct hci_dev *hdev)
2151 struct bt_uuid *uuid, *tmp;
2153 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2154 list_del(&uuid->list);
2159 void hci_link_keys_clear(struct hci_dev *hdev)
2161 struct link_key *key;
2163 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2164 list_del_rcu(&key->list);
2165 kfree_rcu(key, rcu);
2169 void hci_smp_ltks_clear(struct hci_dev *hdev)
2173 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2174 list_del_rcu(&k->list);
2179 void hci_smp_irks_clear(struct hci_dev *hdev)
2183 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2184 list_del_rcu(&k->list);
2189 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2194 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2195 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2205 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2206 u8 key_type, u8 old_key_type)
2209 if (key_type < 0x03)
2212 /* Debug keys are insecure so don't store them persistently */
2213 if (key_type == HCI_LK_DEBUG_COMBINATION)
2216 /* Changed combination key and there's no previous one */
2217 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2220 /* Security mode 3 case */
2224 /* BR/EDR key derived using SC from an LE link */
2225 if (conn->type == LE_LINK)
2228 /* Neither local nor remote side had no-bonding as requirement */
2229 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2232 /* Local side had dedicated bonding as requirement */
2233 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2236 /* Remote side had dedicated bonding as requirement */
2237 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2240 /* If none of the above criteria match, then don't store the key
2245 static u8 ltk_role(u8 type)
2247 if (type == SMP_LTK)
2248 return HCI_ROLE_MASTER;
2250 return HCI_ROLE_SLAVE;
2253 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2254 u8 addr_type, u8 role)
2259 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2260 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2263 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2273 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2275 struct smp_irk *irk;
2278 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2279 if (!bacmp(&irk->rpa, rpa)) {
2285 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2286 if (smp_irk_matches(hdev, irk->val, rpa)) {
2287 bacpy(&irk->rpa, rpa);
2297 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2300 struct smp_irk *irk;
2302 /* Identity Address must be public or static random */
2303 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2307 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2308 if (addr_type == irk->addr_type &&
2309 bacmp(bdaddr, &irk->bdaddr) == 0) {
2319 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2320 bdaddr_t *bdaddr, u8 *val, u8 type,
2321 u8 pin_len, bool *persistent)
2323 struct link_key *key, *old_key;
2326 old_key = hci_find_link_key(hdev, bdaddr);
2328 old_key_type = old_key->type;
2331 old_key_type = conn ? conn->key_type : 0xff;
2332 key = kzalloc(sizeof(*key), GFP_KERNEL);
2335 list_add_rcu(&key->list, &hdev->link_keys);
2338 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2340 /* Some buggy controller combinations generate a changed
2341 * combination key for legacy pairing even when there's no
2343 if (type == HCI_LK_CHANGED_COMBINATION &&
2344 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2345 type = HCI_LK_COMBINATION;
2347 conn->key_type = type;
2350 bacpy(&key->bdaddr, bdaddr);
2351 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2352 key->pin_len = pin_len;
2354 if (type == HCI_LK_CHANGED_COMBINATION)
2355 key->type = old_key_type;
2360 *persistent = hci_persistent_key(hdev, conn, type,
2366 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2367 u8 addr_type, u8 type, u8 authenticated,
2368 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2370 struct smp_ltk *key, *old_key;
2371 u8 role = ltk_role(type);
2373 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2377 key = kzalloc(sizeof(*key), GFP_KERNEL);
2380 list_add_rcu(&key->list, &hdev->long_term_keys);
2383 bacpy(&key->bdaddr, bdaddr);
2384 key->bdaddr_type = addr_type;
2385 memcpy(key->val, tk, sizeof(key->val));
2386 key->authenticated = authenticated;
2389 key->enc_size = enc_size;
2395 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2396 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2398 struct smp_irk *irk;
2400 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2402 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2406 bacpy(&irk->bdaddr, bdaddr);
2407 irk->addr_type = addr_type;
2409 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2412 memcpy(irk->val, val, 16);
2413 bacpy(&irk->rpa, rpa);
2418 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2420 struct link_key *key;
2422 key = hci_find_link_key(hdev, bdaddr);
2426 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2428 list_del_rcu(&key->list);
2429 kfree_rcu(key, rcu);
2434 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2439 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2440 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2443 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2445 list_del_rcu(&k->list);
2450 return removed ? 0 : -ENOENT;
2453 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2457 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2458 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2461 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2463 list_del_rcu(&k->list);
2468 /* HCI command timer function */
2469 static void hci_cmd_timeout(struct work_struct *work)
2471 struct hci_dev *hdev = container_of(work, struct hci_dev,
2474 if (hdev->sent_cmd) {
2475 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2476 u16 opcode = __le16_to_cpu(sent->opcode);
2478 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2480 BT_ERR("%s command tx timeout", hdev->name);
2483 atomic_set(&hdev->cmd_cnt, 1);
2484 queue_work(hdev->workqueue, &hdev->cmd_work);
2487 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2488 bdaddr_t *bdaddr, u8 bdaddr_type)
2490 struct oob_data *data;
2492 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2493 if (bacmp(bdaddr, &data->bdaddr) != 0)
2495 if (data->bdaddr_type != bdaddr_type)
2503 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2506 struct oob_data *data;
2508 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2512 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2514 list_del(&data->list);
2520 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2522 struct oob_data *data, *n;
2524 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2525 list_del(&data->list);
2530 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2531 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2532 u8 *hash256, u8 *rand256)
2534 struct oob_data *data;
2536 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2538 data = kmalloc(sizeof(*data), GFP_KERNEL);
2542 bacpy(&data->bdaddr, bdaddr);
2543 data->bdaddr_type = bdaddr_type;
2544 list_add(&data->list, &hdev->remote_oob_data);
2547 if (hash192 && rand192) {
2548 memcpy(data->hash192, hash192, sizeof(data->hash192));
2549 memcpy(data->rand192, rand192, sizeof(data->rand192));
2551 memset(data->hash192, 0, sizeof(data->hash192));
2552 memset(data->rand192, 0, sizeof(data->rand192));
2555 if (hash256 && rand256) {
2556 memcpy(data->hash256, hash256, sizeof(data->hash256));
2557 memcpy(data->rand256, rand256, sizeof(data->rand256));
2559 memset(data->hash256, 0, sizeof(data->hash256));
2560 memset(data->rand256, 0, sizeof(data->rand256));
2563 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2568 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2569 bdaddr_t *bdaddr, u8 type)
2571 struct bdaddr_list *b;
2573 list_for_each_entry(b, bdaddr_list, list) {
2574 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2581 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2583 struct list_head *p, *n;
2585 list_for_each_safe(p, n, bdaddr_list) {
2586 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2593 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2595 struct bdaddr_list *entry;
2597 if (!bacmp(bdaddr, BDADDR_ANY))
2600 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2603 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2607 bacpy(&entry->bdaddr, bdaddr);
2608 entry->bdaddr_type = type;
2610 list_add(&entry->list, list);
2615 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2617 struct bdaddr_list *entry;
2619 if (!bacmp(bdaddr, BDADDR_ANY)) {
2620 hci_bdaddr_list_clear(list);
2624 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2628 list_del(&entry->list);
2634 /* This function requires the caller holds hdev->lock */
2635 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2636 bdaddr_t *addr, u8 addr_type)
2638 struct hci_conn_params *params;
2640 /* The conn params list only contains identity addresses */
2641 if (!hci_is_identity_address(addr, addr_type))
2644 list_for_each_entry(params, &hdev->le_conn_params, list) {
2645 if (bacmp(¶ms->addr, addr) == 0 &&
2646 params->addr_type == addr_type) {
2654 /* This function requires the caller holds hdev->lock */
2655 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2656 bdaddr_t *addr, u8 addr_type)
2658 struct hci_conn_params *param;
2660 /* The list only contains identity addresses */
2661 if (!hci_is_identity_address(addr, addr_type))
2664 list_for_each_entry(param, list, action) {
2665 if (bacmp(¶m->addr, addr) == 0 &&
2666 param->addr_type == addr_type)
2673 /* This function requires the caller holds hdev->lock */
2674 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2675 bdaddr_t *addr, u8 addr_type)
2677 struct hci_conn_params *params;
2679 if (!hci_is_identity_address(addr, addr_type))
2682 params = hci_conn_params_lookup(hdev, addr, addr_type);
2686 params = kzalloc(sizeof(*params), GFP_KERNEL);
2688 BT_ERR("Out of memory");
2692 bacpy(¶ms->addr, addr);
2693 params->addr_type = addr_type;
2695 list_add(¶ms->list, &hdev->le_conn_params);
2696 INIT_LIST_HEAD(¶ms->action);
2698 params->conn_min_interval = hdev->le_conn_min_interval;
2699 params->conn_max_interval = hdev->le_conn_max_interval;
2700 params->conn_latency = hdev->le_conn_latency;
2701 params->supervision_timeout = hdev->le_supv_timeout;
2702 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2704 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2709 static void hci_conn_params_free(struct hci_conn_params *params)
2712 hci_conn_drop(params->conn);
2713 hci_conn_put(params->conn);
2716 list_del(¶ms->action);
2717 list_del(¶ms->list);
2721 /* This function requires the caller holds hdev->lock */
2722 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2724 struct hci_conn_params *params;
2726 params = hci_conn_params_lookup(hdev, addr, addr_type);
2730 hci_conn_params_free(params);
2732 hci_update_background_scan(hdev);
2734 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2737 /* This function requires the caller holds hdev->lock */
2738 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2740 struct hci_conn_params *params, *tmp;
2742 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2743 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2745 list_del(¶ms->list);
2749 BT_DBG("All LE disabled connection parameters were removed");
2752 /* This function requires the caller holds hdev->lock */
2753 void hci_conn_params_clear_all(struct hci_dev *hdev)
2755 struct hci_conn_params *params, *tmp;
2757 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2758 hci_conn_params_free(params);
2760 hci_update_background_scan(hdev);
2762 BT_DBG("All LE connection parameters were removed");
2765 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2768 BT_ERR("Failed to start inquiry: status %d", status);
2771 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2772 hci_dev_unlock(hdev);
2777 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2780 /* General inquiry access code (GIAC) */
2781 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2782 struct hci_request req;
2783 struct hci_cp_inquiry cp;
2787 BT_ERR("Failed to disable LE scanning: status %d", status);
2791 switch (hdev->discovery.type) {
2792 case DISCOV_TYPE_LE:
2794 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2795 hci_dev_unlock(hdev);
2798 case DISCOV_TYPE_INTERLEAVED:
2799 hci_req_init(&req, hdev);
2801 memset(&cp, 0, sizeof(cp));
2802 memcpy(&cp.lap, lap, sizeof(cp.lap));
2803 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2804 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2808 hci_inquiry_cache_flush(hdev);
2810 err = hci_req_run(&req, inquiry_complete);
2812 BT_ERR("Inquiry request failed: err %d", err);
2813 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2816 hci_dev_unlock(hdev);
2821 static void le_scan_disable_work(struct work_struct *work)
2823 struct hci_dev *hdev = container_of(work, struct hci_dev,
2824 le_scan_disable.work);
2825 struct hci_request req;
2828 BT_DBG("%s", hdev->name);
2830 hci_req_init(&req, hdev);
2832 hci_req_add_le_scan_disable(&req);
2834 err = hci_req_run(&req, le_scan_disable_work_complete);
2836 BT_ERR("Disable LE scanning request failed: err %d", err);
2839 /* Copy the Identity Address of the controller.
2841 * If the controller has a public BD_ADDR, then by default use that one.
2842 * If this is a LE only controller without a public address, default to
2843 * the static random address.
2845 * For debugging purposes it is possible to force controllers with a
2846 * public address to use the static random address instead.
2848 * In case BR/EDR has been disabled on a dual-mode controller and
2849 * userspace has configured a static address, then that address
2850 * becomes the identity address instead of the public BR/EDR address.
2852 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2855 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
2856 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2857 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2858 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2859 bacpy(bdaddr, &hdev->static_addr);
2860 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2862 bacpy(bdaddr, &hdev->bdaddr);
2863 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2867 /* Alloc HCI device */
2868 struct hci_dev *hci_alloc_dev(void)
2870 struct hci_dev *hdev;
2872 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2876 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2877 hdev->esco_type = (ESCO_HV1);
2878 hdev->link_mode = (HCI_LM_ACCEPT);
2879 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2880 hdev->io_capability = 0x03; /* No Input No Output */
2881 hdev->manufacturer = 0xffff; /* Default to internal use */
2882 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2883 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2885 hdev->sniff_max_interval = 800;
2886 hdev->sniff_min_interval = 80;
2888 hdev->le_adv_channel_map = 0x07;
2889 hdev->le_adv_min_interval = 0x0800;
2890 hdev->le_adv_max_interval = 0x0800;
2891 hdev->le_scan_interval = 0x0060;
2892 hdev->le_scan_window = 0x0030;
2893 hdev->le_conn_min_interval = 0x0028;
2894 hdev->le_conn_max_interval = 0x0038;
2895 hdev->le_conn_latency = 0x0000;
2896 hdev->le_supv_timeout = 0x002a;
2897 hdev->le_def_tx_len = 0x001b;
2898 hdev->le_def_tx_time = 0x0148;
2899 hdev->le_max_tx_len = 0x001b;
2900 hdev->le_max_tx_time = 0x0148;
2901 hdev->le_max_rx_len = 0x001b;
2902 hdev->le_max_rx_time = 0x0148;
2904 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2905 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2906 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2907 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2909 mutex_init(&hdev->lock);
2910 mutex_init(&hdev->req_lock);
2912 INIT_LIST_HEAD(&hdev->mgmt_pending);
2913 INIT_LIST_HEAD(&hdev->blacklist);
2914 INIT_LIST_HEAD(&hdev->whitelist);
2915 INIT_LIST_HEAD(&hdev->uuids);
2916 INIT_LIST_HEAD(&hdev->link_keys);
2917 INIT_LIST_HEAD(&hdev->long_term_keys);
2918 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2919 INIT_LIST_HEAD(&hdev->remote_oob_data);
2920 INIT_LIST_HEAD(&hdev->le_white_list);
2921 INIT_LIST_HEAD(&hdev->le_conn_params);
2922 INIT_LIST_HEAD(&hdev->pend_le_conns);
2923 INIT_LIST_HEAD(&hdev->pend_le_reports);
2924 INIT_LIST_HEAD(&hdev->conn_hash.list);
2926 INIT_WORK(&hdev->rx_work, hci_rx_work);
2927 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2928 INIT_WORK(&hdev->tx_work, hci_tx_work);
2929 INIT_WORK(&hdev->power_on, hci_power_on);
2931 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2932 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2933 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2935 skb_queue_head_init(&hdev->rx_q);
2936 skb_queue_head_init(&hdev->cmd_q);
2937 skb_queue_head_init(&hdev->raw_q);
2939 init_waitqueue_head(&hdev->req_wait_q);
2941 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2943 hci_init_sysfs(hdev);
2944 discovery_init(hdev);
2948 EXPORT_SYMBOL(hci_alloc_dev);
2950 /* Free HCI device */
2951 void hci_free_dev(struct hci_dev *hdev)
2953 /* will free via device release */
2954 put_device(&hdev->dev);
2956 EXPORT_SYMBOL(hci_free_dev);
2958 /* Register HCI device */
2959 int hci_register_dev(struct hci_dev *hdev)
2963 if (!hdev->open || !hdev->close || !hdev->send)
2966 /* Do not allow HCI_AMP devices to register at index 0,
2967 * so the index can be used as the AMP controller ID.
2969 switch (hdev->dev_type) {
2971 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2974 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2983 sprintf(hdev->name, "hci%d", id);
2986 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2988 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2989 WQ_MEM_RECLAIM, 1, hdev->name);
2990 if (!hdev->workqueue) {
2995 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2996 WQ_MEM_RECLAIM, 1, hdev->name);
2997 if (!hdev->req_workqueue) {
2998 destroy_workqueue(hdev->workqueue);
3003 if (!IS_ERR_OR_NULL(bt_debugfs))
3004 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3006 dev_set_name(&hdev->dev, "%s", hdev->name);
3008 error = device_add(&hdev->dev);
3012 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3013 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3016 if (rfkill_register(hdev->rfkill) < 0) {
3017 rfkill_destroy(hdev->rfkill);
3018 hdev->rfkill = NULL;
3022 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3023 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3025 set_bit(HCI_SETUP, &hdev->dev_flags);
3026 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3028 if (hdev->dev_type == HCI_BREDR) {
3029 /* Assume BR/EDR support until proven otherwise (such as
3030 * through reading supported features during init.
3032 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3035 write_lock(&hci_dev_list_lock);
3036 list_add(&hdev->list, &hci_dev_list);
3037 write_unlock(&hci_dev_list_lock);
3039 /* Devices that are marked for raw-only usage are unconfigured
3040 * and should not be included in normal operation.
3042 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3043 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3045 hci_notify(hdev, HCI_DEV_REG);
3048 queue_work(hdev->req_workqueue, &hdev->power_on);
3053 destroy_workqueue(hdev->workqueue);
3054 destroy_workqueue(hdev->req_workqueue);
3056 ida_simple_remove(&hci_index_ida, hdev->id);
3060 EXPORT_SYMBOL(hci_register_dev);
3062 /* Unregister HCI device */
3063 void hci_unregister_dev(struct hci_dev *hdev)
3067 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3069 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3073 write_lock(&hci_dev_list_lock);
3074 list_del(&hdev->list);
3075 write_unlock(&hci_dev_list_lock);
3077 hci_dev_do_close(hdev);
3079 for (i = 0; i < NUM_REASSEMBLY; i++)
3080 kfree_skb(hdev->reassembly[i]);
3082 cancel_work_sync(&hdev->power_on);
3084 if (!test_bit(HCI_INIT, &hdev->flags) &&
3085 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3086 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
3088 mgmt_index_removed(hdev);
3089 hci_dev_unlock(hdev);
3092 /* mgmt_index_removed should take care of emptying the
3094 BUG_ON(!list_empty(&hdev->mgmt_pending));
3096 hci_notify(hdev, HCI_DEV_UNREG);
3099 rfkill_unregister(hdev->rfkill);
3100 rfkill_destroy(hdev->rfkill);
3103 smp_unregister(hdev);
3105 device_del(&hdev->dev);
3107 debugfs_remove_recursive(hdev->debugfs);
3109 destroy_workqueue(hdev->workqueue);
3110 destroy_workqueue(hdev->req_workqueue);
3113 hci_bdaddr_list_clear(&hdev->blacklist);
3114 hci_bdaddr_list_clear(&hdev->whitelist);
3115 hci_uuids_clear(hdev);
3116 hci_link_keys_clear(hdev);
3117 hci_smp_ltks_clear(hdev);
3118 hci_smp_irks_clear(hdev);
3119 hci_remote_oob_data_clear(hdev);
3120 hci_bdaddr_list_clear(&hdev->le_white_list);
3121 hci_conn_params_clear_all(hdev);
3122 hci_discovery_filter_clear(hdev);
3123 hci_dev_unlock(hdev);
3127 ida_simple_remove(&hci_index_ida, id);
3129 EXPORT_SYMBOL(hci_unregister_dev);
3131 /* Suspend HCI device */
3132 int hci_suspend_dev(struct hci_dev *hdev)
3134 hci_notify(hdev, HCI_DEV_SUSPEND);
3137 EXPORT_SYMBOL(hci_suspend_dev);
3139 /* Resume HCI device */
3140 int hci_resume_dev(struct hci_dev *hdev)
3142 hci_notify(hdev, HCI_DEV_RESUME);
3145 EXPORT_SYMBOL(hci_resume_dev);
3147 /* Reset HCI device */
3148 int hci_reset_dev(struct hci_dev *hdev)
3150 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3151 struct sk_buff *skb;
3153 skb = bt_skb_alloc(3, GFP_ATOMIC);
3157 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3158 memcpy(skb_put(skb, 3), hw_err, 3);
3160 /* Send Hardware Error to upper stack */
3161 return hci_recv_frame(hdev, skb);
3163 EXPORT_SYMBOL(hci_reset_dev);
3165 /* Receive frame from HCI drivers */
3166 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3168 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3169 && !test_bit(HCI_INIT, &hdev->flags))) {
3175 bt_cb(skb)->incoming = 1;
3178 __net_timestamp(skb);
3180 skb_queue_tail(&hdev->rx_q, skb);
3181 queue_work(hdev->workqueue, &hdev->rx_work);
3185 EXPORT_SYMBOL(hci_recv_frame);
3187 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3188 int count, __u8 index)
3193 struct sk_buff *skb;
3194 struct bt_skb_cb *scb;
3196 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3197 index >= NUM_REASSEMBLY)
3200 skb = hdev->reassembly[index];
3204 case HCI_ACLDATA_PKT:
3205 len = HCI_MAX_FRAME_SIZE;
3206 hlen = HCI_ACL_HDR_SIZE;
3209 len = HCI_MAX_EVENT_SIZE;
3210 hlen = HCI_EVENT_HDR_SIZE;
3212 case HCI_SCODATA_PKT:
3213 len = HCI_MAX_SCO_SIZE;
3214 hlen = HCI_SCO_HDR_SIZE;
3218 skb = bt_skb_alloc(len, GFP_ATOMIC);
3222 scb = (void *) skb->cb;
3224 scb->pkt_type = type;
3226 hdev->reassembly[index] = skb;
3230 scb = (void *) skb->cb;
3231 len = min_t(uint, scb->expect, count);
3233 memcpy(skb_put(skb, len), data, len);
3242 if (skb->len == HCI_EVENT_HDR_SIZE) {
3243 struct hci_event_hdr *h = hci_event_hdr(skb);
3244 scb->expect = h->plen;
3246 if (skb_tailroom(skb) < scb->expect) {
3248 hdev->reassembly[index] = NULL;
3254 case HCI_ACLDATA_PKT:
3255 if (skb->len == HCI_ACL_HDR_SIZE) {
3256 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3257 scb->expect = __le16_to_cpu(h->dlen);
3259 if (skb_tailroom(skb) < scb->expect) {
3261 hdev->reassembly[index] = NULL;
3267 case HCI_SCODATA_PKT:
3268 if (skb->len == HCI_SCO_HDR_SIZE) {
3269 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3270 scb->expect = h->dlen;
3272 if (skb_tailroom(skb) < scb->expect) {
3274 hdev->reassembly[index] = NULL;
3281 if (scb->expect == 0) {
3282 /* Complete frame */
3284 bt_cb(skb)->pkt_type = type;
3285 hci_recv_frame(hdev, skb);
3287 hdev->reassembly[index] = NULL;
3295 #define STREAM_REASSEMBLY 0
3297 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3303 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3306 struct { char type; } *pkt;
3308 /* Start of the frame */
3315 type = bt_cb(skb)->pkt_type;
3317 rem = hci_reassembly(hdev, type, data, count,
3322 data += (count - rem);
3328 EXPORT_SYMBOL(hci_recv_stream_fragment);
3330 /* ---- Interface to upper protocols ---- */
3332 int hci_register_cb(struct hci_cb *cb)
3334 BT_DBG("%p name %s", cb, cb->name);
3336 write_lock(&hci_cb_list_lock);
3337 list_add(&cb->list, &hci_cb_list);
3338 write_unlock(&hci_cb_list_lock);
3342 EXPORT_SYMBOL(hci_register_cb);
3344 int hci_unregister_cb(struct hci_cb *cb)
3346 BT_DBG("%p name %s", cb, cb->name);
3348 write_lock(&hci_cb_list_lock);
3349 list_del(&cb->list);
3350 write_unlock(&hci_cb_list_lock);
3354 EXPORT_SYMBOL(hci_unregister_cb);
3356 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3360 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3363 __net_timestamp(skb);
3365 /* Send copy to monitor */
3366 hci_send_to_monitor(hdev, skb);
3368 if (atomic_read(&hdev->promisc)) {
3369 /* Send copy to the sockets */
3370 hci_send_to_sock(hdev, skb);
3373 /* Get rid of skb owner, prior to sending to the driver. */
3376 err = hdev->send(hdev, skb);
3378 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3383 bool hci_req_pending(struct hci_dev *hdev)
3385 return (hdev->req_status == HCI_REQ_PEND);
3388 /* Send HCI command */
3389 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3392 struct sk_buff *skb;
3394 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3396 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3398 BT_ERR("%s no memory for command", hdev->name);
3402 /* Stand-alone HCI commands must be flagged as
3403 * single-command requests.
3405 bt_cb(skb)->req.start = true;
3407 skb_queue_tail(&hdev->cmd_q, skb);
3408 queue_work(hdev->workqueue, &hdev->cmd_work);
3413 /* Get data from the previously sent command */
3414 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3416 struct hci_command_hdr *hdr;
3418 if (!hdev->sent_cmd)
3421 hdr = (void *) hdev->sent_cmd->data;
3423 if (hdr->opcode != cpu_to_le16(opcode))
3426 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3428 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3432 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3434 struct hci_acl_hdr *hdr;
3437 skb_push(skb, HCI_ACL_HDR_SIZE);
3438 skb_reset_transport_header(skb);
3439 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3440 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3441 hdr->dlen = cpu_to_le16(len);
3444 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3445 struct sk_buff *skb, __u16 flags)
3447 struct hci_conn *conn = chan->conn;
3448 struct hci_dev *hdev = conn->hdev;
3449 struct sk_buff *list;
3451 skb->len = skb_headlen(skb);
3454 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3456 switch (hdev->dev_type) {
3458 hci_add_acl_hdr(skb, conn->handle, flags);
3461 hci_add_acl_hdr(skb, chan->handle, flags);
3464 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3468 list = skb_shinfo(skb)->frag_list;
3470 /* Non fragmented */
3471 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3473 skb_queue_tail(queue, skb);
3476 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3478 skb_shinfo(skb)->frag_list = NULL;
3480 /* Queue all fragments atomically. We need to use spin_lock_bh
3481 * here because of 6LoWPAN links, as there this function is
3482 * called from softirq and using normal spin lock could cause
3485 spin_lock_bh(&queue->lock);
3487 __skb_queue_tail(queue, skb);
3489 flags &= ~ACL_START;
3492 skb = list; list = list->next;
3494 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3495 hci_add_acl_hdr(skb, conn->handle, flags);
3497 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3499 __skb_queue_tail(queue, skb);
3502 spin_unlock_bh(&queue->lock);
3506 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3508 struct hci_dev *hdev = chan->conn->hdev;
3510 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3512 hci_queue_acl(chan, &chan->data_q, skb, flags);
3514 queue_work(hdev->workqueue, &hdev->tx_work);
3518 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3520 struct hci_dev *hdev = conn->hdev;
3521 struct hci_sco_hdr hdr;
3523 BT_DBG("%s len %d", hdev->name, skb->len);
3525 hdr.handle = cpu_to_le16(conn->handle);
3526 hdr.dlen = skb->len;
3528 skb_push(skb, HCI_SCO_HDR_SIZE);
3529 skb_reset_transport_header(skb);
3530 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3532 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3534 skb_queue_tail(&conn->data_q, skb);
3535 queue_work(hdev->workqueue, &hdev->tx_work);
3538 /* ---- HCI TX task (outgoing data) ---- */
3540 /* HCI Connection scheduler */
3541 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3544 struct hci_conn_hash *h = &hdev->conn_hash;
3545 struct hci_conn *conn = NULL, *c;
3546 unsigned int num = 0, min = ~0;
3548 /* We don't have to lock device here. Connections are always
3549 * added and removed with TX task disabled. */
3553 list_for_each_entry_rcu(c, &h->list, list) {
3554 if (c->type != type || skb_queue_empty(&c->data_q))
3557 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3562 if (c->sent < min) {
3567 if (hci_conn_num(hdev, type) == num)
3576 switch (conn->type) {
3578 cnt = hdev->acl_cnt;
3582 cnt = hdev->sco_cnt;
3585 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3589 BT_ERR("Unknown link type");
3597 BT_DBG("conn %p quote %d", conn, *quote);
3601 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3603 struct hci_conn_hash *h = &hdev->conn_hash;
3606 BT_ERR("%s link tx timeout", hdev->name);
3610 /* Kill stalled connections */
3611 list_for_each_entry_rcu(c, &h->list, list) {
3612 if (c->type == type && c->sent) {
3613 BT_ERR("%s killing stalled connection %pMR",
3614 hdev->name, &c->dst);
3615 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3622 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3625 struct hci_conn_hash *h = &hdev->conn_hash;
3626 struct hci_chan *chan = NULL;
3627 unsigned int num = 0, min = ~0, cur_prio = 0;
3628 struct hci_conn *conn;
3629 int cnt, q, conn_num = 0;
3631 BT_DBG("%s", hdev->name);
3635 list_for_each_entry_rcu(conn, &h->list, list) {
3636 struct hci_chan *tmp;
3638 if (conn->type != type)
3641 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3646 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3647 struct sk_buff *skb;
3649 if (skb_queue_empty(&tmp->data_q))
3652 skb = skb_peek(&tmp->data_q);
3653 if (skb->priority < cur_prio)
3656 if (skb->priority > cur_prio) {
3659 cur_prio = skb->priority;
3664 if (conn->sent < min) {
3670 if (hci_conn_num(hdev, type) == conn_num)
3679 switch (chan->conn->type) {
3681 cnt = hdev->acl_cnt;
3684 cnt = hdev->block_cnt;
3688 cnt = hdev->sco_cnt;
3691 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3695 BT_ERR("Unknown link type");
3700 BT_DBG("chan %p quote %d", chan, *quote);
3704 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3706 struct hci_conn_hash *h = &hdev->conn_hash;
3707 struct hci_conn *conn;
3710 BT_DBG("%s", hdev->name);
3714 list_for_each_entry_rcu(conn, &h->list, list) {
3715 struct hci_chan *chan;
3717 if (conn->type != type)
3720 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3725 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3726 struct sk_buff *skb;
3733 if (skb_queue_empty(&chan->data_q))
3736 skb = skb_peek(&chan->data_q);
3737 if (skb->priority >= HCI_PRIO_MAX - 1)
3740 skb->priority = HCI_PRIO_MAX - 1;
3742 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3746 if (hci_conn_num(hdev, type) == num)
3754 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3756 /* Calculate count of blocks used by this packet */
3757 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3760 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3762 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
3763 /* ACL tx timeout must be longer than maximum
3764 * link supervision timeout (40.9 seconds) */
3765 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3766 HCI_ACL_TX_TIMEOUT))
3767 hci_link_tx_to(hdev, ACL_LINK);
3771 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3773 unsigned int cnt = hdev->acl_cnt;
3774 struct hci_chan *chan;
3775 struct sk_buff *skb;
3778 __check_timeout(hdev, cnt);
3780 while (hdev->acl_cnt &&
3781 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3782 u32 priority = (skb_peek(&chan->data_q))->priority;
3783 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3784 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3785 skb->len, skb->priority);
3787 /* Stop if priority has changed */
3788 if (skb->priority < priority)
3791 skb = skb_dequeue(&chan->data_q);
3793 hci_conn_enter_active_mode(chan->conn,
3794 bt_cb(skb)->force_active);
3796 hci_send_frame(hdev, skb);
3797 hdev->acl_last_tx = jiffies;
3805 if (cnt != hdev->acl_cnt)
3806 hci_prio_recalculate(hdev, ACL_LINK);
3809 static void hci_sched_acl_blk(struct hci_dev *hdev)
3811 unsigned int cnt = hdev->block_cnt;
3812 struct hci_chan *chan;
3813 struct sk_buff *skb;
3817 __check_timeout(hdev, cnt);
3819 BT_DBG("%s", hdev->name);
3821 if (hdev->dev_type == HCI_AMP)
3826 while (hdev->block_cnt > 0 &&
3827 (chan = hci_chan_sent(hdev, type, "e))) {
3828 u32 priority = (skb_peek(&chan->data_q))->priority;
3829 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3832 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3833 skb->len, skb->priority);
3835 /* Stop if priority has changed */
3836 if (skb->priority < priority)
3839 skb = skb_dequeue(&chan->data_q);
3841 blocks = __get_blocks(hdev, skb);
3842 if (blocks > hdev->block_cnt)
3845 hci_conn_enter_active_mode(chan->conn,
3846 bt_cb(skb)->force_active);
3848 hci_send_frame(hdev, skb);
3849 hdev->acl_last_tx = jiffies;
3851 hdev->block_cnt -= blocks;
3854 chan->sent += blocks;
3855 chan->conn->sent += blocks;
3859 if (cnt != hdev->block_cnt)
3860 hci_prio_recalculate(hdev, type);
3863 static void hci_sched_acl(struct hci_dev *hdev)
3865 BT_DBG("%s", hdev->name);
3867 /* No ACL link over BR/EDR controller */
3868 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3871 /* No AMP link over AMP controller */
3872 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3875 switch (hdev->flow_ctl_mode) {
3876 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3877 hci_sched_acl_pkt(hdev);
3880 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3881 hci_sched_acl_blk(hdev);
3887 static void hci_sched_sco(struct hci_dev *hdev)
3889 struct hci_conn *conn;
3890 struct sk_buff *skb;
3893 BT_DBG("%s", hdev->name);
3895 if (!hci_conn_num(hdev, SCO_LINK))
3898 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3899 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3900 BT_DBG("skb %p len %d", skb, skb->len);
3901 hci_send_frame(hdev, skb);
3904 if (conn->sent == ~0)
3910 static void hci_sched_esco(struct hci_dev *hdev)
3912 struct hci_conn *conn;
3913 struct sk_buff *skb;
3916 BT_DBG("%s", hdev->name);
3918 if (!hci_conn_num(hdev, ESCO_LINK))
3921 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3923 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3924 BT_DBG("skb %p len %d", skb, skb->len);
3925 hci_send_frame(hdev, skb);
3928 if (conn->sent == ~0)
3934 static void hci_sched_le(struct hci_dev *hdev)
3936 struct hci_chan *chan;
3937 struct sk_buff *skb;
3938 int quote, cnt, tmp;
3940 BT_DBG("%s", hdev->name);
3942 if (!hci_conn_num(hdev, LE_LINK))
3945 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
3946 /* LE tx timeout must be longer than maximum
3947 * link supervision timeout (40.9 seconds) */
3948 if (!hdev->le_cnt && hdev->le_pkts &&
3949 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3950 hci_link_tx_to(hdev, LE_LINK);
3953 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3955 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3956 u32 priority = (skb_peek(&chan->data_q))->priority;
3957 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3958 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3959 skb->len, skb->priority);
3961 /* Stop if priority has changed */
3962 if (skb->priority < priority)
3965 skb = skb_dequeue(&chan->data_q);
3967 hci_send_frame(hdev, skb);
3968 hdev->le_last_tx = jiffies;
3979 hdev->acl_cnt = cnt;
3982 hci_prio_recalculate(hdev, LE_LINK);
3985 static void hci_tx_work(struct work_struct *work)
3987 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3988 struct sk_buff *skb;
3990 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3991 hdev->sco_cnt, hdev->le_cnt);
3993 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3994 /* Schedule queues and send stuff to HCI driver */
3995 hci_sched_acl(hdev);
3996 hci_sched_sco(hdev);
3997 hci_sched_esco(hdev);
4001 /* Send next queued raw (unknown type) packet */
4002 while ((skb = skb_dequeue(&hdev->raw_q)))
4003 hci_send_frame(hdev, skb);
4006 /* ----- HCI RX task (incoming data processing) ----- */
4008 /* ACL data packet */
4009 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4011 struct hci_acl_hdr *hdr = (void *) skb->data;
4012 struct hci_conn *conn;
4013 __u16 handle, flags;
4015 skb_pull(skb, HCI_ACL_HDR_SIZE);
4017 handle = __le16_to_cpu(hdr->handle);
4018 flags = hci_flags(handle);
4019 handle = hci_handle(handle);
4021 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4024 hdev->stat.acl_rx++;
4027 conn = hci_conn_hash_lookup_handle(hdev, handle);
4028 hci_dev_unlock(hdev);
4031 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4033 /* Send to upper protocol */
4034 l2cap_recv_acldata(conn, skb, flags);
4037 BT_ERR("%s ACL packet for unknown connection handle %d",
4038 hdev->name, handle);
4044 /* SCO data packet */
4045 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4047 struct hci_sco_hdr *hdr = (void *) skb->data;
4048 struct hci_conn *conn;
4051 skb_pull(skb, HCI_SCO_HDR_SIZE);
4053 handle = __le16_to_cpu(hdr->handle);
4055 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4057 hdev->stat.sco_rx++;
4060 conn = hci_conn_hash_lookup_handle(hdev, handle);
4061 hci_dev_unlock(hdev);
4064 /* Send to upper protocol */
4065 sco_recv_scodata(conn, skb);
4068 BT_ERR("%s SCO packet for unknown connection handle %d",
4069 hdev->name, handle);
4075 static bool hci_req_is_complete(struct hci_dev *hdev)
4077 struct sk_buff *skb;
4079 skb = skb_peek(&hdev->cmd_q);
4083 return bt_cb(skb)->req.start;
4086 static void hci_resend_last(struct hci_dev *hdev)
4088 struct hci_command_hdr *sent;
4089 struct sk_buff *skb;
4092 if (!hdev->sent_cmd)
4095 sent = (void *) hdev->sent_cmd->data;
4096 opcode = __le16_to_cpu(sent->opcode);
4097 if (opcode == HCI_OP_RESET)
4100 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4104 skb_queue_head(&hdev->cmd_q, skb);
4105 queue_work(hdev->workqueue, &hdev->cmd_work);
4108 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4110 hci_req_complete_t req_complete = NULL;
4111 struct sk_buff *skb;
4112 unsigned long flags;
4114 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4116 /* If the completed command doesn't match the last one that was
4117 * sent we need to do special handling of it.
4119 if (!hci_sent_cmd_data(hdev, opcode)) {
4120 /* Some CSR based controllers generate a spontaneous
4121 * reset complete event during init and any pending
4122 * command will never be completed. In such a case we
4123 * need to resend whatever was the last sent
4126 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4127 hci_resend_last(hdev);
4132 /* If the command succeeded and there's still more commands in
4133 * this request the request is not yet complete.
4135 if (!status && !hci_req_is_complete(hdev))
4138 /* If this was the last command in a request the complete
4139 * callback would be found in hdev->sent_cmd instead of the
4140 * command queue (hdev->cmd_q).
4142 if (hdev->sent_cmd) {
4143 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4146 /* We must set the complete callback to NULL to
4147 * avoid calling the callback more than once if
4148 * this function gets called again.
4150 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4156 /* Remove all pending commands belonging to this request */
4157 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4158 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4159 if (bt_cb(skb)->req.start) {
4160 __skb_queue_head(&hdev->cmd_q, skb);
4164 req_complete = bt_cb(skb)->req.complete;
4167 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4171 req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
4174 static void hci_rx_work(struct work_struct *work)
4176 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4177 struct sk_buff *skb;
4179 BT_DBG("%s", hdev->name);
4181 while ((skb = skb_dequeue(&hdev->rx_q))) {
4182 /* Send copy to monitor */
4183 hci_send_to_monitor(hdev, skb);
4185 if (atomic_read(&hdev->promisc)) {
4186 /* Send copy to the sockets */
4187 hci_send_to_sock(hdev, skb);
4190 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4195 if (test_bit(HCI_INIT, &hdev->flags)) {
4196 /* Don't process data packets in this states. */
4197 switch (bt_cb(skb)->pkt_type) {
4198 case HCI_ACLDATA_PKT:
4199 case HCI_SCODATA_PKT:
4206 switch (bt_cb(skb)->pkt_type) {
4208 BT_DBG("%s Event packet", hdev->name);
4209 hci_event_packet(hdev, skb);
4212 case HCI_ACLDATA_PKT:
4213 BT_DBG("%s ACL data packet", hdev->name);
4214 hci_acldata_packet(hdev, skb);
4217 case HCI_SCODATA_PKT:
4218 BT_DBG("%s SCO data packet", hdev->name);
4219 hci_scodata_packet(hdev, skb);
4229 static void hci_cmd_work(struct work_struct *work)
4231 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4232 struct sk_buff *skb;
4234 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4235 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4237 /* Send queued commands */
4238 if (atomic_read(&hdev->cmd_cnt)) {
4239 skb = skb_dequeue(&hdev->cmd_q);
4243 kfree_skb(hdev->sent_cmd);
4245 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4246 if (hdev->sent_cmd) {
4247 atomic_dec(&hdev->cmd_cnt);
4248 hci_send_frame(hdev, skb);
4249 if (test_bit(HCI_RESET, &hdev->flags))
4250 cancel_delayed_work(&hdev->cmd_timer);
4252 schedule_delayed_work(&hdev->cmd_timer,
4255 skb_queue_head(&hdev->cmd_q, skb);
4256 queue_work(hdev->workqueue, &hdev->cmd_work);