2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
92 hdev->recv_evt = NULL;
97 return ERR_PTR(-ENODATA);
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
108 if (hdr->evt != event)
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
126 if (opcode == __le16_to_cpu(ev->opcode))
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
134 return ERR_PTR(-ENODATA);
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138 const void *param, u8 event, u32 timeout)
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
144 BT_DBG("%s", hdev->name);
146 hci_req_init(&req, hdev);
148 hci_req_add_ev(&req, opcode, plen, param, event);
150 hdev->req_status = HCI_REQ_PEND;
152 err = hci_req_run(&req, hci_req_sync_complete);
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
159 schedule_timeout(timeout);
161 remove_wait_queue(&hdev->req_wait_q, &wait);
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
166 switch (hdev->req_status) {
168 err = -bt_to_errno(hdev->req_result);
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
180 hdev->req_status = hdev->req_result = 0;
182 BT_DBG("%s end: err %d", hdev->name, err);
187 return hci_get_cmd_complete(hdev, opcode, event);
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192 const void *param, u32 timeout)
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
196 EXPORT_SYMBOL(__hci_cmd_sync);
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200 void (*func)(struct hci_request *req,
202 unsigned long opt, __u32 timeout)
204 struct hci_request req;
205 DECLARE_WAITQUEUE(wait, current);
208 BT_DBG("%s start", hdev->name);
210 hci_req_init(&req, hdev);
212 hdev->req_status = HCI_REQ_PEND;
216 err = hci_req_run(&req, hci_req_sync_complete);
218 hdev->req_status = 0;
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
234 schedule_timeout(timeout);
236 remove_wait_queue(&hdev->req_wait_q, &wait);
238 if (signal_pending(current))
241 switch (hdev->req_status) {
243 err = -bt_to_errno(hdev->req_result);
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
255 hdev->req_status = hdev->req_result = 0;
257 BT_DBG("%s end: err %d", hdev->name, err);
262 static int hci_req_sync(struct hci_dev *hdev,
263 void (*req)(struct hci_request *req,
265 unsigned long opt, __u32 timeout)
269 if (!test_bit(HCI_UP, &hdev->flags))
272 /* Serialize all requests */
274 ret = __hci_req_sync(hdev, req, opt, timeout);
275 hci_req_unlock(hdev);
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
282 BT_DBG("%s %ld", req->hdev->name, opt);
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
289 static void bredr_init(struct hci_request *req)
291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
293 /* Read Local Supported Features */
294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
296 /* Read Local Version */
297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
299 /* Read BD Address */
300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
303 static void amp_init(struct hci_request *req)
305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
307 /* Read Local Version */
308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
310 /* Read Local AMP Info */
311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
313 /* Read Data Blk size */
314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
319 struct hci_dev *hdev = req->hdev;
321 BT_DBG("%s %ld", hdev->name, opt);
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325 hci_reset_req(req, 0);
327 switch (hdev->dev_type) {
337 BT_ERR("Unknown device type %d", hdev->dev_type);
342 static void bredr_setup(struct hci_request *req)
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
350 /* Read Class of Device */
351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
353 /* Read Local Name */
354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
356 /* Read Voice Setting */
357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
374 static void le_setup(struct hci_request *req)
376 struct hci_dev *hdev = req->hdev;
378 /* Read LE Buffer Size */
379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
381 /* Read LE Local Supported Features */
382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
384 /* Read LE Advertising Channel TX Power */
385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
387 /* Read LE White List Size */
388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
390 /* Read LE Supported States */
391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
400 if (lmp_ext_inq_capable(hdev))
403 if (lmp_inq_rssi_capable(hdev))
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
426 static void hci_setup_inquiry_mode(struct hci_request *req)
430 mode = hci_get_inquiry_mode(req->hdev);
432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
435 static void hci_setup_event_mask(struct hci_request *req)
437 struct hci_dev *hdev = req->hdev;
439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
459 if (lmp_inq_rssi_capable(hdev))
460 events[4] |= 0x02; /* Inquiry Result with RSSI */
462 if (lmp_sniffsubr_capable(hdev))
463 events[5] |= 0x20; /* Sniff Subrating */
465 if (lmp_pause_enc_capable(hdev))
466 events[5] |= 0x80; /* Encryption Key Refresh Complete */
468 if (lmp_ext_inq_capable(hdev))
469 events[5] |= 0x40; /* Extended Inquiry Result */
471 if (lmp_no_flush_capable(hdev))
472 events[7] |= 0x01; /* Enhanced Flush Complete */
474 if (lmp_lsto_capable(hdev))
475 events[6] |= 0x80; /* Link Supervision Timeout Changed */
477 if (lmp_ssp_capable(hdev)) {
478 events[6] |= 0x01; /* IO Capability Request */
479 events[6] |= 0x02; /* IO Capability Response */
480 events[6] |= 0x04; /* User Confirmation Request */
481 events[6] |= 0x08; /* User Passkey Request */
482 events[6] |= 0x10; /* Remote OOB Data Request */
483 events[6] |= 0x20; /* Simple Pairing Complete */
484 events[7] |= 0x04; /* User Passkey Notification */
485 events[7] |= 0x08; /* Keypress Notification */
486 events[7] |= 0x10; /* Remote Host Supported
487 * Features Notification
491 if (lmp_le_capable(hdev))
492 events[7] |= 0x20; /* LE Meta-Event */
494 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
496 if (lmp_le_capable(hdev)) {
497 memset(events, 0, sizeof(events));
499 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
500 sizeof(events), events);
504 static void hci_init2_req(struct hci_request *req, unsigned long opt)
506 struct hci_dev *hdev = req->hdev;
508 if (lmp_bredr_capable(hdev))
511 if (lmp_le_capable(hdev))
514 hci_setup_event_mask(req);
516 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
517 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
519 if (lmp_ssp_capable(hdev)) {
520 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
522 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
523 sizeof(mode), &mode);
525 struct hci_cp_write_eir cp;
527 memset(hdev->eir, 0, sizeof(hdev->eir));
528 memset(&cp, 0, sizeof(cp));
530 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
534 if (lmp_inq_rssi_capable(hdev))
535 hci_setup_inquiry_mode(req);
537 if (lmp_inq_tx_pwr_capable(hdev))
538 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
540 if (lmp_ext_feat_capable(hdev)) {
541 struct hci_cp_read_local_ext_features cp;
544 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
548 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
550 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
555 static void hci_setup_link_policy(struct hci_request *req)
557 struct hci_dev *hdev = req->hdev;
558 struct hci_cp_write_def_link_policy cp;
561 if (lmp_rswitch_capable(hdev))
562 link_policy |= HCI_LP_RSWITCH;
563 if (lmp_hold_capable(hdev))
564 link_policy |= HCI_LP_HOLD;
565 if (lmp_sniff_capable(hdev))
566 link_policy |= HCI_LP_SNIFF;
567 if (lmp_park_capable(hdev))
568 link_policy |= HCI_LP_PARK;
570 cp.policy = cpu_to_le16(link_policy);
571 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
574 static void hci_set_le_support(struct hci_request *req)
576 struct hci_dev *hdev = req->hdev;
577 struct hci_cp_write_le_host_supported cp;
579 /* LE-only devices do not support explicit enablement */
580 if (!lmp_bredr_capable(hdev))
583 memset(&cp, 0, sizeof(cp));
585 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
587 cp.simul = lmp_le_br_capable(hdev);
590 if (cp.le != lmp_host_le_capable(hdev))
591 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
595 static void hci_init3_req(struct hci_request *req, unsigned long opt)
597 struct hci_dev *hdev = req->hdev;
600 /* Only send HCI_Delete_Stored_Link_Key if it is supported */
601 if (hdev->commands[6] & 0x80) {
602 struct hci_cp_delete_stored_link_key cp;
604 bacpy(&cp.bdaddr, BDADDR_ANY);
605 cp.delete_all = 0x01;
606 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
610 if (hdev->commands[5] & 0x10)
611 hci_setup_link_policy(req);
613 if (lmp_le_capable(hdev)) {
614 hci_set_le_support(req);
618 /* Read features beyond page 1 if available */
619 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
620 struct hci_cp_read_local_ext_features cp;
623 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
628 static int __hci_init(struct hci_dev *hdev)
632 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
636 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
637 * BR/EDR/LE type controllers. AMP controllers only need the
640 if (hdev->dev_type != HCI_BREDR)
643 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
647 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
650 static void hci_scan_req(struct hci_request *req, unsigned long opt)
654 BT_DBG("%s %x", req->hdev->name, scan);
656 /* Inquiry and Page scans */
657 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
660 static void hci_auth_req(struct hci_request *req, unsigned long opt)
664 BT_DBG("%s %x", req->hdev->name, auth);
667 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
670 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
674 BT_DBG("%s %x", req->hdev->name, encrypt);
677 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
680 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
682 __le16 policy = cpu_to_le16(opt);
684 BT_DBG("%s %x", req->hdev->name, policy);
686 /* Default link policy */
687 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
690 /* Get HCI device by index.
691 * Device is held on return. */
692 struct hci_dev *hci_dev_get(int index)
694 struct hci_dev *hdev = NULL, *d;
701 read_lock(&hci_dev_list_lock);
702 list_for_each_entry(d, &hci_dev_list, list) {
703 if (d->id == index) {
704 hdev = hci_dev_hold(d);
708 read_unlock(&hci_dev_list_lock);
712 /* ---- Inquiry support ---- */
714 bool hci_discovery_active(struct hci_dev *hdev)
716 struct discovery_state *discov = &hdev->discovery;
718 switch (discov->state) {
719 case DISCOVERY_FINDING:
720 case DISCOVERY_RESOLVING:
728 void hci_discovery_set_state(struct hci_dev *hdev, int state)
730 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
732 if (hdev->discovery.state == state)
736 case DISCOVERY_STOPPED:
737 if (hdev->discovery.state != DISCOVERY_STARTING)
738 mgmt_discovering(hdev, 0);
740 case DISCOVERY_STARTING:
742 case DISCOVERY_FINDING:
743 mgmt_discovering(hdev, 1);
745 case DISCOVERY_RESOLVING:
747 case DISCOVERY_STOPPING:
751 hdev->discovery.state = state;
754 static void inquiry_cache_flush(struct hci_dev *hdev)
756 struct discovery_state *cache = &hdev->discovery;
757 struct inquiry_entry *p, *n;
759 list_for_each_entry_safe(p, n, &cache->all, all) {
764 INIT_LIST_HEAD(&cache->unknown);
765 INIT_LIST_HEAD(&cache->resolve);
768 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
771 struct discovery_state *cache = &hdev->discovery;
772 struct inquiry_entry *e;
774 BT_DBG("cache %p, %pMR", cache, bdaddr);
776 list_for_each_entry(e, &cache->all, all) {
777 if (!bacmp(&e->data.bdaddr, bdaddr))
784 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
787 struct discovery_state *cache = &hdev->discovery;
788 struct inquiry_entry *e;
790 BT_DBG("cache %p, %pMR", cache, bdaddr);
792 list_for_each_entry(e, &cache->unknown, list) {
793 if (!bacmp(&e->data.bdaddr, bdaddr))
800 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
804 struct discovery_state *cache = &hdev->discovery;
805 struct inquiry_entry *e;
807 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
809 list_for_each_entry(e, &cache->resolve, list) {
810 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
812 if (!bacmp(&e->data.bdaddr, bdaddr))
819 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
820 struct inquiry_entry *ie)
822 struct discovery_state *cache = &hdev->discovery;
823 struct list_head *pos = &cache->resolve;
824 struct inquiry_entry *p;
828 list_for_each_entry(p, &cache->resolve, list) {
829 if (p->name_state != NAME_PENDING &&
830 abs(p->data.rssi) >= abs(ie->data.rssi))
835 list_add(&ie->list, pos);
838 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
839 bool name_known, bool *ssp)
841 struct discovery_state *cache = &hdev->discovery;
842 struct inquiry_entry *ie;
844 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
846 hci_remove_remote_oob_data(hdev, &data->bdaddr);
849 *ssp = data->ssp_mode;
851 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
853 if (ie->data.ssp_mode && ssp)
856 if (ie->name_state == NAME_NEEDED &&
857 data->rssi != ie->data.rssi) {
858 ie->data.rssi = data->rssi;
859 hci_inquiry_cache_update_resolve(hdev, ie);
865 /* Entry not in the cache. Add new one. */
866 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
870 list_add(&ie->all, &cache->all);
873 ie->name_state = NAME_KNOWN;
875 ie->name_state = NAME_NOT_KNOWN;
876 list_add(&ie->list, &cache->unknown);
880 if (name_known && ie->name_state != NAME_KNOWN &&
881 ie->name_state != NAME_PENDING) {
882 ie->name_state = NAME_KNOWN;
886 memcpy(&ie->data, data, sizeof(*data));
887 ie->timestamp = jiffies;
888 cache->timestamp = jiffies;
890 if (ie->name_state == NAME_NOT_KNOWN)
896 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
898 struct discovery_state *cache = &hdev->discovery;
899 struct inquiry_info *info = (struct inquiry_info *) buf;
900 struct inquiry_entry *e;
903 list_for_each_entry(e, &cache->all, all) {
904 struct inquiry_data *data = &e->data;
909 bacpy(&info->bdaddr, &data->bdaddr);
910 info->pscan_rep_mode = data->pscan_rep_mode;
911 info->pscan_period_mode = data->pscan_period_mode;
912 info->pscan_mode = data->pscan_mode;
913 memcpy(info->dev_class, data->dev_class, 3);
914 info->clock_offset = data->clock_offset;
920 BT_DBG("cache %p, copied %d", cache, copied);
924 static void hci_inq_req(struct hci_request *req, unsigned long opt)
926 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
927 struct hci_dev *hdev = req->hdev;
928 struct hci_cp_inquiry cp;
930 BT_DBG("%s", hdev->name);
932 if (test_bit(HCI_INQUIRY, &hdev->flags))
936 memcpy(&cp.lap, &ir->lap, 3);
937 cp.length = ir->length;
938 cp.num_rsp = ir->num_rsp;
939 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
942 static int wait_inquiry(void *word)
945 return signal_pending(current);
948 int hci_inquiry(void __user *arg)
950 __u8 __user *ptr = arg;
951 struct hci_inquiry_req ir;
952 struct hci_dev *hdev;
953 int err = 0, do_inquiry = 0, max_rsp;
957 if (copy_from_user(&ir, ptr, sizeof(ir)))
960 hdev = hci_dev_get(ir.dev_id);
965 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
966 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
967 inquiry_cache_flush(hdev);
970 hci_dev_unlock(hdev);
972 timeo = ir.length * msecs_to_jiffies(2000);
975 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
980 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
981 * cleared). If it is interrupted by a signal, return -EINTR.
983 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
988 /* for unlimited number of responses we will use buffer with
991 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
993 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
994 * copy it to the user space.
996 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1003 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1004 hci_dev_unlock(hdev);
1006 BT_DBG("num_rsp %d", ir.num_rsp);
1008 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1010 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1023 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1025 u8 ad_len = 0, flags = 0;
1028 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1029 flags |= LE_AD_GENERAL;
1031 if (!lmp_bredr_capable(hdev))
1032 flags |= LE_AD_NO_BREDR;
1034 if (lmp_le_br_capable(hdev))
1035 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1037 if (lmp_host_le_br_capable(hdev))
1038 flags |= LE_AD_SIM_LE_BREDR_HOST;
1041 BT_DBG("adv flags 0x%02x", flags);
1051 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1053 ptr[1] = EIR_TX_POWER;
1054 ptr[2] = (u8) hdev->adv_tx_power;
1060 name_len = strlen(hdev->dev_name);
1062 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1064 if (name_len > max_len) {
1066 ptr[1] = EIR_NAME_SHORT;
1068 ptr[1] = EIR_NAME_COMPLETE;
1070 ptr[0] = name_len + 1;
1072 memcpy(ptr + 2, hdev->dev_name, name_len);
1074 ad_len += (name_len + 2);
1075 ptr += (name_len + 2);
1081 void hci_update_ad(struct hci_request *req)
1083 struct hci_dev *hdev = req->hdev;
1084 struct hci_cp_le_set_adv_data cp;
1087 if (!lmp_le_capable(hdev))
1090 memset(&cp, 0, sizeof(cp));
1092 len = create_ad(hdev, cp.data);
1094 if (hdev->adv_data_len == len &&
1095 memcmp(cp.data, hdev->adv_data, len) == 0)
1098 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1099 hdev->adv_data_len = len;
1103 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1106 /* ---- HCI ioctl helpers ---- */
1108 int hci_dev_open(__u16 dev)
1110 struct hci_dev *hdev;
1113 hdev = hci_dev_get(dev);
1117 BT_DBG("%s %p", hdev->name, hdev);
1121 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1126 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1131 if (test_bit(HCI_UP, &hdev->flags)) {
1136 if (hdev->open(hdev)) {
1141 atomic_set(&hdev->cmd_cnt, 1);
1142 set_bit(HCI_INIT, &hdev->flags);
1144 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1145 ret = hdev->setup(hdev);
1148 /* Treat all non BR/EDR controllers as raw devices if
1149 * enable_hs is not set.
1151 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1152 set_bit(HCI_RAW, &hdev->flags);
1154 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1155 set_bit(HCI_RAW, &hdev->flags);
1157 if (!test_bit(HCI_RAW, &hdev->flags))
1158 ret = __hci_init(hdev);
1161 clear_bit(HCI_INIT, &hdev->flags);
1165 set_bit(HCI_UP, &hdev->flags);
1166 hci_notify(hdev, HCI_DEV_UP);
1167 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1168 mgmt_valid_hdev(hdev)) {
1170 mgmt_powered(hdev, 1);
1171 hci_dev_unlock(hdev);
1174 /* Init failed, cleanup */
1175 flush_work(&hdev->tx_work);
1176 flush_work(&hdev->cmd_work);
1177 flush_work(&hdev->rx_work);
1179 skb_queue_purge(&hdev->cmd_q);
1180 skb_queue_purge(&hdev->rx_q);
1185 if (hdev->sent_cmd) {
1186 kfree_skb(hdev->sent_cmd);
1187 hdev->sent_cmd = NULL;
1195 hci_req_unlock(hdev);
1200 static int hci_dev_do_close(struct hci_dev *hdev)
1202 BT_DBG("%s %p", hdev->name, hdev);
1204 cancel_work_sync(&hdev->le_scan);
1206 cancel_delayed_work(&hdev->power_off);
1208 hci_req_cancel(hdev, ENODEV);
1211 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1212 del_timer_sync(&hdev->cmd_timer);
1213 hci_req_unlock(hdev);
1217 /* Flush RX and TX works */
1218 flush_work(&hdev->tx_work);
1219 flush_work(&hdev->rx_work);
1221 if (hdev->discov_timeout > 0) {
1222 cancel_delayed_work(&hdev->discov_off);
1223 hdev->discov_timeout = 0;
1224 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1227 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1228 cancel_delayed_work(&hdev->service_cache);
1230 cancel_delayed_work_sync(&hdev->le_scan_disable);
1233 inquiry_cache_flush(hdev);
1234 hci_conn_hash_flush(hdev);
1235 hci_dev_unlock(hdev);
1237 hci_notify(hdev, HCI_DEV_DOWN);
1243 skb_queue_purge(&hdev->cmd_q);
1244 atomic_set(&hdev->cmd_cnt, 1);
1245 if (!test_bit(HCI_RAW, &hdev->flags) &&
1246 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1247 set_bit(HCI_INIT, &hdev->flags);
1248 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1249 clear_bit(HCI_INIT, &hdev->flags);
1252 /* flush cmd work */
1253 flush_work(&hdev->cmd_work);
1256 skb_queue_purge(&hdev->rx_q);
1257 skb_queue_purge(&hdev->cmd_q);
1258 skb_queue_purge(&hdev->raw_q);
1260 /* Drop last sent command */
1261 if (hdev->sent_cmd) {
1262 del_timer_sync(&hdev->cmd_timer);
1263 kfree_skb(hdev->sent_cmd);
1264 hdev->sent_cmd = NULL;
1267 kfree_skb(hdev->recv_evt);
1268 hdev->recv_evt = NULL;
1270 /* After this point our queues are empty
1271 * and no tasks are scheduled. */
1276 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1278 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1279 mgmt_valid_hdev(hdev)) {
1281 mgmt_powered(hdev, 0);
1282 hci_dev_unlock(hdev);
1285 /* Controller radio is available but is currently powered down */
1286 hdev->amp_status = 0;
1288 memset(hdev->eir, 0, sizeof(hdev->eir));
1289 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1291 hci_req_unlock(hdev);
1297 int hci_dev_close(__u16 dev)
1299 struct hci_dev *hdev;
1302 hdev = hci_dev_get(dev);
1306 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1307 cancel_delayed_work(&hdev->power_off);
1309 err = hci_dev_do_close(hdev);
1315 int hci_dev_reset(__u16 dev)
1317 struct hci_dev *hdev;
1320 hdev = hci_dev_get(dev);
1326 if (!test_bit(HCI_UP, &hdev->flags))
1330 skb_queue_purge(&hdev->rx_q);
1331 skb_queue_purge(&hdev->cmd_q);
1334 inquiry_cache_flush(hdev);
1335 hci_conn_hash_flush(hdev);
1336 hci_dev_unlock(hdev);
1341 atomic_set(&hdev->cmd_cnt, 1);
1342 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1344 if (!test_bit(HCI_RAW, &hdev->flags))
1345 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1348 hci_req_unlock(hdev);
1353 int hci_dev_reset_stat(__u16 dev)
1355 struct hci_dev *hdev;
1358 hdev = hci_dev_get(dev);
1362 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1369 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1371 struct hci_dev *hdev;
1372 struct hci_dev_req dr;
1375 if (copy_from_user(&dr, arg, sizeof(dr)))
1378 hdev = hci_dev_get(dr.dev_id);
1384 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1389 if (!lmp_encrypt_capable(hdev)) {
1394 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1395 /* Auth must be enabled first */
1396 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1402 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1407 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1412 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1416 case HCISETLINKMODE:
1417 hdev->link_mode = ((__u16) dr.dev_opt) &
1418 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1422 hdev->pkt_type = (__u16) dr.dev_opt;
1426 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1427 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1431 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1432 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1444 int hci_get_dev_list(void __user *arg)
1446 struct hci_dev *hdev;
1447 struct hci_dev_list_req *dl;
1448 struct hci_dev_req *dr;
1449 int n = 0, size, err;
1452 if (get_user(dev_num, (__u16 __user *) arg))
1455 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1458 size = sizeof(*dl) + dev_num * sizeof(*dr);
1460 dl = kzalloc(size, GFP_KERNEL);
1466 read_lock(&hci_dev_list_lock);
1467 list_for_each_entry(hdev, &hci_dev_list, list) {
1468 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1469 cancel_delayed_work(&hdev->power_off);
1471 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1472 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1474 (dr + n)->dev_id = hdev->id;
1475 (dr + n)->dev_opt = hdev->flags;
1480 read_unlock(&hci_dev_list_lock);
1483 size = sizeof(*dl) + n * sizeof(*dr);
1485 err = copy_to_user(arg, dl, size);
1488 return err ? -EFAULT : 0;
1491 int hci_get_dev_info(void __user *arg)
1493 struct hci_dev *hdev;
1494 struct hci_dev_info di;
1497 if (copy_from_user(&di, arg, sizeof(di)))
1500 hdev = hci_dev_get(di.dev_id);
1504 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1505 cancel_delayed_work_sync(&hdev->power_off);
1507 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1508 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1510 strcpy(di.name, hdev->name);
1511 di.bdaddr = hdev->bdaddr;
1512 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1513 di.flags = hdev->flags;
1514 di.pkt_type = hdev->pkt_type;
1515 if (lmp_bredr_capable(hdev)) {
1516 di.acl_mtu = hdev->acl_mtu;
1517 di.acl_pkts = hdev->acl_pkts;
1518 di.sco_mtu = hdev->sco_mtu;
1519 di.sco_pkts = hdev->sco_pkts;
1521 di.acl_mtu = hdev->le_mtu;
1522 di.acl_pkts = hdev->le_pkts;
1526 di.link_policy = hdev->link_policy;
1527 di.link_mode = hdev->link_mode;
1529 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1530 memcpy(&di.features, &hdev->features, sizeof(di.features));
1532 if (copy_to_user(arg, &di, sizeof(di)))
1540 /* ---- Interface to HCI drivers ---- */
1542 static int hci_rfkill_set_block(void *data, bool blocked)
1544 struct hci_dev *hdev = data;
1546 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1549 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1550 hci_dev_do_close(hdev);
1552 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1558 static const struct rfkill_ops hci_rfkill_ops = {
1559 .set_block = hci_rfkill_set_block,
1562 static void hci_power_on(struct work_struct *work)
1564 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1567 BT_DBG("%s", hdev->name);
1569 err = hci_dev_open(hdev->id);
1571 mgmt_set_powered_failed(hdev, err);
1575 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1576 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1577 HCI_AUTO_OFF_TIMEOUT);
1579 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1580 mgmt_index_added(hdev);
1583 static void hci_power_off(struct work_struct *work)
1585 struct hci_dev *hdev = container_of(work, struct hci_dev,
1588 BT_DBG("%s", hdev->name);
1590 hci_dev_do_close(hdev);
1593 static void hci_discov_off(struct work_struct *work)
1595 struct hci_dev *hdev;
1596 u8 scan = SCAN_PAGE;
1598 hdev = container_of(work, struct hci_dev, discov_off.work);
1600 BT_DBG("%s", hdev->name);
1604 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1606 hdev->discov_timeout = 0;
1608 hci_dev_unlock(hdev);
1611 int hci_uuids_clear(struct hci_dev *hdev)
1613 struct bt_uuid *uuid, *tmp;
1615 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1616 list_del(&uuid->list);
1623 int hci_link_keys_clear(struct hci_dev *hdev)
1625 struct list_head *p, *n;
1627 list_for_each_safe(p, n, &hdev->link_keys) {
1628 struct link_key *key;
1630 key = list_entry(p, struct link_key, list);
1639 int hci_smp_ltks_clear(struct hci_dev *hdev)
1641 struct smp_ltk *k, *tmp;
1643 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1651 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1655 list_for_each_entry(k, &hdev->link_keys, list)
1656 if (bacmp(bdaddr, &k->bdaddr) == 0)
1662 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1663 u8 key_type, u8 old_key_type)
1666 if (key_type < 0x03)
1669 /* Debug keys are insecure so don't store them persistently */
1670 if (key_type == HCI_LK_DEBUG_COMBINATION)
1673 /* Changed combination key and there's no previous one */
1674 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1677 /* Security mode 3 case */
1681 /* Neither local nor remote side had no-bonding as requirement */
1682 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1685 /* Local side had dedicated bonding as requirement */
1686 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1689 /* Remote side had dedicated bonding as requirement */
1690 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1693 /* If none of the above criteria match, then don't store the key
1698 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1702 list_for_each_entry(k, &hdev->long_term_keys, list) {
1703 if (k->ediv != ediv ||
1704 memcmp(rand, k->rand, sizeof(k->rand)))
1713 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1718 list_for_each_entry(k, &hdev->long_term_keys, list)
1719 if (addr_type == k->bdaddr_type &&
1720 bacmp(bdaddr, &k->bdaddr) == 0)
1726 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1727 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1729 struct link_key *key, *old_key;
1733 old_key = hci_find_link_key(hdev, bdaddr);
1735 old_key_type = old_key->type;
1738 old_key_type = conn ? conn->key_type : 0xff;
1739 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1742 list_add(&key->list, &hdev->link_keys);
1745 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1747 /* Some buggy controller combinations generate a changed
1748 * combination key for legacy pairing even when there's no
1750 if (type == HCI_LK_CHANGED_COMBINATION &&
1751 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1752 type = HCI_LK_COMBINATION;
1754 conn->key_type = type;
1757 bacpy(&key->bdaddr, bdaddr);
1758 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1759 key->pin_len = pin_len;
1761 if (type == HCI_LK_CHANGED_COMBINATION)
1762 key->type = old_key_type;
1769 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1771 mgmt_new_link_key(hdev, key, persistent);
1774 conn->flush_key = !persistent;
1779 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1780 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1783 struct smp_ltk *key, *old_key;
1785 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1788 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1792 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1795 list_add(&key->list, &hdev->long_term_keys);
1798 bacpy(&key->bdaddr, bdaddr);
1799 key->bdaddr_type = addr_type;
1800 memcpy(key->val, tk, sizeof(key->val));
1801 key->authenticated = authenticated;
1803 key->enc_size = enc_size;
1805 memcpy(key->rand, rand, sizeof(key->rand));
1810 if (type & HCI_SMP_LTK)
1811 mgmt_new_ltk(hdev, key, 1);
1816 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1818 struct link_key *key;
1820 key = hci_find_link_key(hdev, bdaddr);
1824 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1826 list_del(&key->list);
1832 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1834 struct smp_ltk *k, *tmp;
1836 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1837 if (bacmp(bdaddr, &k->bdaddr))
1840 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1849 /* HCI command timer function */
1850 static void hci_cmd_timeout(unsigned long arg)
1852 struct hci_dev *hdev = (void *) arg;
1854 if (hdev->sent_cmd) {
1855 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1856 u16 opcode = __le16_to_cpu(sent->opcode);
1858 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1860 BT_ERR("%s command tx timeout", hdev->name);
1863 atomic_set(&hdev->cmd_cnt, 1);
1864 queue_work(hdev->workqueue, &hdev->cmd_work);
1867 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1870 struct oob_data *data;
1872 list_for_each_entry(data, &hdev->remote_oob_data, list)
1873 if (bacmp(bdaddr, &data->bdaddr) == 0)
1879 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1881 struct oob_data *data;
1883 data = hci_find_remote_oob_data(hdev, bdaddr);
1887 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1889 list_del(&data->list);
1895 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1897 struct oob_data *data, *n;
1899 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1900 list_del(&data->list);
1907 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1910 struct oob_data *data;
1912 data = hci_find_remote_oob_data(hdev, bdaddr);
1915 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1919 bacpy(&data->bdaddr, bdaddr);
1920 list_add(&data->list, &hdev->remote_oob_data);
1923 memcpy(data->hash, hash, sizeof(data->hash));
1924 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1926 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1931 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1933 struct bdaddr_list *b;
1935 list_for_each_entry(b, &hdev->blacklist, list)
1936 if (bacmp(bdaddr, &b->bdaddr) == 0)
1942 int hci_blacklist_clear(struct hci_dev *hdev)
1944 struct list_head *p, *n;
1946 list_for_each_safe(p, n, &hdev->blacklist) {
1947 struct bdaddr_list *b;
1949 b = list_entry(p, struct bdaddr_list, list);
1958 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1960 struct bdaddr_list *entry;
1962 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1965 if (hci_blacklist_lookup(hdev, bdaddr))
1968 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1972 bacpy(&entry->bdaddr, bdaddr);
1974 list_add(&entry->list, &hdev->blacklist);
1976 return mgmt_device_blocked(hdev, bdaddr, type);
1979 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1981 struct bdaddr_list *entry;
1983 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1984 return hci_blacklist_clear(hdev);
1986 entry = hci_blacklist_lookup(hdev, bdaddr);
1990 list_del(&entry->list);
1993 return mgmt_device_unblocked(hdev, bdaddr, type);
1996 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1998 struct le_scan_params *param = (struct le_scan_params *) opt;
1999 struct hci_cp_le_set_scan_param cp;
2001 memset(&cp, 0, sizeof(cp));
2002 cp.type = param->type;
2003 cp.interval = cpu_to_le16(param->interval);
2004 cp.window = cpu_to_le16(param->window);
2006 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
2009 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
2011 struct hci_cp_le_set_scan_enable cp;
2013 memset(&cp, 0, sizeof(cp));
2014 cp.enable = LE_SCAN_ENABLE;
2015 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2017 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2020 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
2021 u16 window, int timeout)
2023 long timeo = msecs_to_jiffies(3000);
2024 struct le_scan_params param;
2027 BT_DBG("%s", hdev->name);
2029 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2030 return -EINPROGRESS;
2033 param.interval = interval;
2034 param.window = window;
2038 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) ¶m,
2041 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
2043 hci_req_unlock(hdev);
2048 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2054 int hci_cancel_le_scan(struct hci_dev *hdev)
2056 BT_DBG("%s", hdev->name);
2058 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2061 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2062 struct hci_cp_le_set_scan_enable cp;
2064 /* Send HCI command to disable LE Scan */
2065 memset(&cp, 0, sizeof(cp));
2066 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2072 static void le_scan_disable_work(struct work_struct *work)
2074 struct hci_dev *hdev = container_of(work, struct hci_dev,
2075 le_scan_disable.work);
2076 struct hci_cp_le_set_scan_enable cp;
2078 BT_DBG("%s", hdev->name);
2080 memset(&cp, 0, sizeof(cp));
2082 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2085 static void le_scan_work(struct work_struct *work)
2087 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2088 struct le_scan_params *param = &hdev->le_scan_params;
2090 BT_DBG("%s", hdev->name);
2092 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2096 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
2099 struct le_scan_params *param = &hdev->le_scan_params;
2101 BT_DBG("%s", hdev->name);
2103 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2106 if (work_busy(&hdev->le_scan))
2107 return -EINPROGRESS;
2110 param->interval = interval;
2111 param->window = window;
2112 param->timeout = timeout;
2114 queue_work(system_long_wq, &hdev->le_scan);
2119 /* Alloc HCI device */
2120 struct hci_dev *hci_alloc_dev(void)
2122 struct hci_dev *hdev;
2124 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2128 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2129 hdev->esco_type = (ESCO_HV1);
2130 hdev->link_mode = (HCI_LM_ACCEPT);
2131 hdev->io_capability = 0x03; /* No Input No Output */
2132 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2133 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2135 hdev->sniff_max_interval = 800;
2136 hdev->sniff_min_interval = 80;
2138 mutex_init(&hdev->lock);
2139 mutex_init(&hdev->req_lock);
2141 INIT_LIST_HEAD(&hdev->mgmt_pending);
2142 INIT_LIST_HEAD(&hdev->blacklist);
2143 INIT_LIST_HEAD(&hdev->uuids);
2144 INIT_LIST_HEAD(&hdev->link_keys);
2145 INIT_LIST_HEAD(&hdev->long_term_keys);
2146 INIT_LIST_HEAD(&hdev->remote_oob_data);
2147 INIT_LIST_HEAD(&hdev->conn_hash.list);
2149 INIT_WORK(&hdev->rx_work, hci_rx_work);
2150 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2151 INIT_WORK(&hdev->tx_work, hci_tx_work);
2152 INIT_WORK(&hdev->power_on, hci_power_on);
2153 INIT_WORK(&hdev->le_scan, le_scan_work);
2155 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2156 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2157 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2159 skb_queue_head_init(&hdev->rx_q);
2160 skb_queue_head_init(&hdev->cmd_q);
2161 skb_queue_head_init(&hdev->raw_q);
2163 init_waitqueue_head(&hdev->req_wait_q);
2165 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2167 hci_init_sysfs(hdev);
2168 discovery_init(hdev);
2172 EXPORT_SYMBOL(hci_alloc_dev);
2174 /* Free HCI device */
2175 void hci_free_dev(struct hci_dev *hdev)
2177 /* will free via device release */
2178 put_device(&hdev->dev);
2180 EXPORT_SYMBOL(hci_free_dev);
2182 /* Register HCI device */
2183 int hci_register_dev(struct hci_dev *hdev)
2187 if (!hdev->open || !hdev->close)
2190 /* Do not allow HCI_AMP devices to register at index 0,
2191 * so the index can be used as the AMP controller ID.
2193 switch (hdev->dev_type) {
2195 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2198 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2207 sprintf(hdev->name, "hci%d", id);
2210 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2212 write_lock(&hci_dev_list_lock);
2213 list_add(&hdev->list, &hci_dev_list);
2214 write_unlock(&hci_dev_list_lock);
2216 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2218 if (!hdev->workqueue) {
2223 hdev->req_workqueue = alloc_workqueue(hdev->name,
2224 WQ_HIGHPRI | WQ_UNBOUND |
2226 if (!hdev->req_workqueue) {
2227 destroy_workqueue(hdev->workqueue);
2232 error = hci_add_sysfs(hdev);
2236 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2237 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2240 if (rfkill_register(hdev->rfkill) < 0) {
2241 rfkill_destroy(hdev->rfkill);
2242 hdev->rfkill = NULL;
2246 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2247 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2249 set_bit(HCI_SETUP, &hdev->dev_flags);
2251 if (hdev->dev_type != HCI_AMP)
2252 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2254 hci_notify(hdev, HCI_DEV_REG);
2257 queue_work(hdev->req_workqueue, &hdev->power_on);
2262 destroy_workqueue(hdev->workqueue);
2263 destroy_workqueue(hdev->req_workqueue);
2265 ida_simple_remove(&hci_index_ida, hdev->id);
2266 write_lock(&hci_dev_list_lock);
2267 list_del(&hdev->list);
2268 write_unlock(&hci_dev_list_lock);
2272 EXPORT_SYMBOL(hci_register_dev);
2274 /* Unregister HCI device */
2275 void hci_unregister_dev(struct hci_dev *hdev)
2279 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2281 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2285 write_lock(&hci_dev_list_lock);
2286 list_del(&hdev->list);
2287 write_unlock(&hci_dev_list_lock);
2289 hci_dev_do_close(hdev);
2291 for (i = 0; i < NUM_REASSEMBLY; i++)
2292 kfree_skb(hdev->reassembly[i]);
2294 cancel_work_sync(&hdev->power_on);
2296 if (!test_bit(HCI_INIT, &hdev->flags) &&
2297 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2299 mgmt_index_removed(hdev);
2300 hci_dev_unlock(hdev);
2303 /* mgmt_index_removed should take care of emptying the
2305 BUG_ON(!list_empty(&hdev->mgmt_pending));
2307 hci_notify(hdev, HCI_DEV_UNREG);
2310 rfkill_unregister(hdev->rfkill);
2311 rfkill_destroy(hdev->rfkill);
2314 hci_del_sysfs(hdev);
2316 destroy_workqueue(hdev->workqueue);
2317 destroy_workqueue(hdev->req_workqueue);
2320 hci_blacklist_clear(hdev);
2321 hci_uuids_clear(hdev);
2322 hci_link_keys_clear(hdev);
2323 hci_smp_ltks_clear(hdev);
2324 hci_remote_oob_data_clear(hdev);
2325 hci_dev_unlock(hdev);
2329 ida_simple_remove(&hci_index_ida, id);
2331 EXPORT_SYMBOL(hci_unregister_dev);
2333 /* Suspend HCI device */
2334 int hci_suspend_dev(struct hci_dev *hdev)
2336 hci_notify(hdev, HCI_DEV_SUSPEND);
2339 EXPORT_SYMBOL(hci_suspend_dev);
2341 /* Resume HCI device */
2342 int hci_resume_dev(struct hci_dev *hdev)
2344 hci_notify(hdev, HCI_DEV_RESUME);
2347 EXPORT_SYMBOL(hci_resume_dev);
2349 /* Receive frame from HCI drivers */
2350 int hci_recv_frame(struct sk_buff *skb)
2352 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2353 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2354 && !test_bit(HCI_INIT, &hdev->flags))) {
2360 bt_cb(skb)->incoming = 1;
2363 __net_timestamp(skb);
2365 skb_queue_tail(&hdev->rx_q, skb);
2366 queue_work(hdev->workqueue, &hdev->rx_work);
2370 EXPORT_SYMBOL(hci_recv_frame);
2372 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2373 int count, __u8 index)
2378 struct sk_buff *skb;
2379 struct bt_skb_cb *scb;
2381 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2382 index >= NUM_REASSEMBLY)
2385 skb = hdev->reassembly[index];
2389 case HCI_ACLDATA_PKT:
2390 len = HCI_MAX_FRAME_SIZE;
2391 hlen = HCI_ACL_HDR_SIZE;
2394 len = HCI_MAX_EVENT_SIZE;
2395 hlen = HCI_EVENT_HDR_SIZE;
2397 case HCI_SCODATA_PKT:
2398 len = HCI_MAX_SCO_SIZE;
2399 hlen = HCI_SCO_HDR_SIZE;
2403 skb = bt_skb_alloc(len, GFP_ATOMIC);
2407 scb = (void *) skb->cb;
2409 scb->pkt_type = type;
2411 skb->dev = (void *) hdev;
2412 hdev->reassembly[index] = skb;
2416 scb = (void *) skb->cb;
2417 len = min_t(uint, scb->expect, count);
2419 memcpy(skb_put(skb, len), data, len);
2428 if (skb->len == HCI_EVENT_HDR_SIZE) {
2429 struct hci_event_hdr *h = hci_event_hdr(skb);
2430 scb->expect = h->plen;
2432 if (skb_tailroom(skb) < scb->expect) {
2434 hdev->reassembly[index] = NULL;
2440 case HCI_ACLDATA_PKT:
2441 if (skb->len == HCI_ACL_HDR_SIZE) {
2442 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2443 scb->expect = __le16_to_cpu(h->dlen);
2445 if (skb_tailroom(skb) < scb->expect) {
2447 hdev->reassembly[index] = NULL;
2453 case HCI_SCODATA_PKT:
2454 if (skb->len == HCI_SCO_HDR_SIZE) {
2455 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2456 scb->expect = h->dlen;
2458 if (skb_tailroom(skb) < scb->expect) {
2460 hdev->reassembly[index] = NULL;
2467 if (scb->expect == 0) {
2468 /* Complete frame */
2470 bt_cb(skb)->pkt_type = type;
2471 hci_recv_frame(skb);
2473 hdev->reassembly[index] = NULL;
2481 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2485 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2489 rem = hci_reassembly(hdev, type, data, count, type - 1);
2493 data += (count - rem);
2499 EXPORT_SYMBOL(hci_recv_fragment);
2501 #define STREAM_REASSEMBLY 0
2503 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2509 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2512 struct { char type; } *pkt;
2514 /* Start of the frame */
2521 type = bt_cb(skb)->pkt_type;
2523 rem = hci_reassembly(hdev, type, data, count,
2528 data += (count - rem);
2534 EXPORT_SYMBOL(hci_recv_stream_fragment);
2536 /* ---- Interface to upper protocols ---- */
2538 int hci_register_cb(struct hci_cb *cb)
2540 BT_DBG("%p name %s", cb, cb->name);
2542 write_lock(&hci_cb_list_lock);
2543 list_add(&cb->list, &hci_cb_list);
2544 write_unlock(&hci_cb_list_lock);
2548 EXPORT_SYMBOL(hci_register_cb);
2550 int hci_unregister_cb(struct hci_cb *cb)
2552 BT_DBG("%p name %s", cb, cb->name);
2554 write_lock(&hci_cb_list_lock);
2555 list_del(&cb->list);
2556 write_unlock(&hci_cb_list_lock);
2560 EXPORT_SYMBOL(hci_unregister_cb);
2562 static int hci_send_frame(struct sk_buff *skb)
2564 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2571 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2574 __net_timestamp(skb);
2576 /* Send copy to monitor */
2577 hci_send_to_monitor(hdev, skb);
2579 if (atomic_read(&hdev->promisc)) {
2580 /* Send copy to the sockets */
2581 hci_send_to_sock(hdev, skb);
2584 /* Get rid of skb owner, prior to sending to the driver. */
2587 return hdev->send(skb);
2590 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2592 skb_queue_head_init(&req->cmd_q);
2597 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2599 struct hci_dev *hdev = req->hdev;
2600 struct sk_buff *skb;
2601 unsigned long flags;
2603 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2605 /* If an error occured during request building, remove all HCI
2606 * commands queued on the HCI request queue.
2609 skb_queue_purge(&req->cmd_q);
2613 /* Do not allow empty requests */
2614 if (skb_queue_empty(&req->cmd_q))
2617 skb = skb_peek_tail(&req->cmd_q);
2618 bt_cb(skb)->req.complete = complete;
2620 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2621 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2622 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2624 queue_work(hdev->workqueue, &hdev->cmd_work);
2629 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2630 u32 plen, const void *param)
2632 int len = HCI_COMMAND_HDR_SIZE + plen;
2633 struct hci_command_hdr *hdr;
2634 struct sk_buff *skb;
2636 skb = bt_skb_alloc(len, GFP_ATOMIC);
2640 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2641 hdr->opcode = cpu_to_le16(opcode);
2645 memcpy(skb_put(skb, plen), param, plen);
2647 BT_DBG("skb len %d", skb->len);
2649 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2650 skb->dev = (void *) hdev;
2655 /* Send HCI command */
2656 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2659 struct sk_buff *skb;
2661 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2663 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2665 BT_ERR("%s no memory for command", hdev->name);
2669 /* Stand-alone HCI commands must be flaged as
2670 * single-command requests.
2672 bt_cb(skb)->req.start = true;
2674 skb_queue_tail(&hdev->cmd_q, skb);
2675 queue_work(hdev->workqueue, &hdev->cmd_work);
2680 /* Queue a command to an asynchronous HCI request */
2681 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2682 const void *param, u8 event)
2684 struct hci_dev *hdev = req->hdev;
2685 struct sk_buff *skb;
2687 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2689 /* If an error occured during request building, there is no point in
2690 * queueing the HCI command. We can simply return.
2695 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2697 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2698 hdev->name, opcode);
2703 if (skb_queue_empty(&req->cmd_q))
2704 bt_cb(skb)->req.start = true;
2706 bt_cb(skb)->req.event = event;
2708 skb_queue_tail(&req->cmd_q, skb);
2711 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2714 hci_req_add_ev(req, opcode, plen, param, 0);
2717 /* Get data from the previously sent command */
2718 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2720 struct hci_command_hdr *hdr;
2722 if (!hdev->sent_cmd)
2725 hdr = (void *) hdev->sent_cmd->data;
2727 if (hdr->opcode != cpu_to_le16(opcode))
2730 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2732 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2736 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2738 struct hci_acl_hdr *hdr;
2741 skb_push(skb, HCI_ACL_HDR_SIZE);
2742 skb_reset_transport_header(skb);
2743 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2744 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2745 hdr->dlen = cpu_to_le16(len);
2748 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2749 struct sk_buff *skb, __u16 flags)
2751 struct hci_conn *conn = chan->conn;
2752 struct hci_dev *hdev = conn->hdev;
2753 struct sk_buff *list;
2755 skb->len = skb_headlen(skb);
2758 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2760 switch (hdev->dev_type) {
2762 hci_add_acl_hdr(skb, conn->handle, flags);
2765 hci_add_acl_hdr(skb, chan->handle, flags);
2768 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2772 list = skb_shinfo(skb)->frag_list;
2774 /* Non fragmented */
2775 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2777 skb_queue_tail(queue, skb);
2780 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2782 skb_shinfo(skb)->frag_list = NULL;
2784 /* Queue all fragments atomically */
2785 spin_lock(&queue->lock);
2787 __skb_queue_tail(queue, skb);
2789 flags &= ~ACL_START;
2792 skb = list; list = list->next;
2794 skb->dev = (void *) hdev;
2795 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2796 hci_add_acl_hdr(skb, conn->handle, flags);
2798 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2800 __skb_queue_tail(queue, skb);
2803 spin_unlock(&queue->lock);
2807 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2809 struct hci_dev *hdev = chan->conn->hdev;
2811 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2813 skb->dev = (void *) hdev;
2815 hci_queue_acl(chan, &chan->data_q, skb, flags);
2817 queue_work(hdev->workqueue, &hdev->tx_work);
2821 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2823 struct hci_dev *hdev = conn->hdev;
2824 struct hci_sco_hdr hdr;
2826 BT_DBG("%s len %d", hdev->name, skb->len);
2828 hdr.handle = cpu_to_le16(conn->handle);
2829 hdr.dlen = skb->len;
2831 skb_push(skb, HCI_SCO_HDR_SIZE);
2832 skb_reset_transport_header(skb);
2833 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2835 skb->dev = (void *) hdev;
2836 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2838 skb_queue_tail(&conn->data_q, skb);
2839 queue_work(hdev->workqueue, &hdev->tx_work);
2842 /* ---- HCI TX task (outgoing data) ---- */
2844 /* HCI Connection scheduler */
2845 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2848 struct hci_conn_hash *h = &hdev->conn_hash;
2849 struct hci_conn *conn = NULL, *c;
2850 unsigned int num = 0, min = ~0;
2852 /* We don't have to lock device here. Connections are always
2853 * added and removed with TX task disabled. */
2857 list_for_each_entry_rcu(c, &h->list, list) {
2858 if (c->type != type || skb_queue_empty(&c->data_q))
2861 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2866 if (c->sent < min) {
2871 if (hci_conn_num(hdev, type) == num)
2880 switch (conn->type) {
2882 cnt = hdev->acl_cnt;
2886 cnt = hdev->sco_cnt;
2889 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2893 BT_ERR("Unknown link type");
2901 BT_DBG("conn %p quote %d", conn, *quote);
2905 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2907 struct hci_conn_hash *h = &hdev->conn_hash;
2910 BT_ERR("%s link tx timeout", hdev->name);
2914 /* Kill stalled connections */
2915 list_for_each_entry_rcu(c, &h->list, list) {
2916 if (c->type == type && c->sent) {
2917 BT_ERR("%s killing stalled connection %pMR",
2918 hdev->name, &c->dst);
2919 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2926 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2929 struct hci_conn_hash *h = &hdev->conn_hash;
2930 struct hci_chan *chan = NULL;
2931 unsigned int num = 0, min = ~0, cur_prio = 0;
2932 struct hci_conn *conn;
2933 int cnt, q, conn_num = 0;
2935 BT_DBG("%s", hdev->name);
2939 list_for_each_entry_rcu(conn, &h->list, list) {
2940 struct hci_chan *tmp;
2942 if (conn->type != type)
2945 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2950 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2951 struct sk_buff *skb;
2953 if (skb_queue_empty(&tmp->data_q))
2956 skb = skb_peek(&tmp->data_q);
2957 if (skb->priority < cur_prio)
2960 if (skb->priority > cur_prio) {
2963 cur_prio = skb->priority;
2968 if (conn->sent < min) {
2974 if (hci_conn_num(hdev, type) == conn_num)
2983 switch (chan->conn->type) {
2985 cnt = hdev->acl_cnt;
2988 cnt = hdev->block_cnt;
2992 cnt = hdev->sco_cnt;
2995 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2999 BT_ERR("Unknown link type");
3004 BT_DBG("chan %p quote %d", chan, *quote);
3008 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3010 struct hci_conn_hash *h = &hdev->conn_hash;
3011 struct hci_conn *conn;
3014 BT_DBG("%s", hdev->name);
3018 list_for_each_entry_rcu(conn, &h->list, list) {
3019 struct hci_chan *chan;
3021 if (conn->type != type)
3024 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3029 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3030 struct sk_buff *skb;
3037 if (skb_queue_empty(&chan->data_q))
3040 skb = skb_peek(&chan->data_q);
3041 if (skb->priority >= HCI_PRIO_MAX - 1)
3044 skb->priority = HCI_PRIO_MAX - 1;
3046 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3050 if (hci_conn_num(hdev, type) == num)
3058 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3060 /* Calculate count of blocks used by this packet */
3061 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3064 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3066 if (!test_bit(HCI_RAW, &hdev->flags)) {
3067 /* ACL tx timeout must be longer than maximum
3068 * link supervision timeout (40.9 seconds) */
3069 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3070 HCI_ACL_TX_TIMEOUT))
3071 hci_link_tx_to(hdev, ACL_LINK);
3075 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3077 unsigned int cnt = hdev->acl_cnt;
3078 struct hci_chan *chan;
3079 struct sk_buff *skb;
3082 __check_timeout(hdev, cnt);
3084 while (hdev->acl_cnt &&
3085 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3086 u32 priority = (skb_peek(&chan->data_q))->priority;
3087 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3088 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3089 skb->len, skb->priority);
3091 /* Stop if priority has changed */
3092 if (skb->priority < priority)
3095 skb = skb_dequeue(&chan->data_q);
3097 hci_conn_enter_active_mode(chan->conn,
3098 bt_cb(skb)->force_active);
3100 hci_send_frame(skb);
3101 hdev->acl_last_tx = jiffies;
3109 if (cnt != hdev->acl_cnt)
3110 hci_prio_recalculate(hdev, ACL_LINK);
3113 static void hci_sched_acl_blk(struct hci_dev *hdev)
3115 unsigned int cnt = hdev->block_cnt;
3116 struct hci_chan *chan;
3117 struct sk_buff *skb;
3121 __check_timeout(hdev, cnt);
3123 BT_DBG("%s", hdev->name);
3125 if (hdev->dev_type == HCI_AMP)
3130 while (hdev->block_cnt > 0 &&
3131 (chan = hci_chan_sent(hdev, type, "e))) {
3132 u32 priority = (skb_peek(&chan->data_q))->priority;
3133 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3136 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3137 skb->len, skb->priority);
3139 /* Stop if priority has changed */
3140 if (skb->priority < priority)
3143 skb = skb_dequeue(&chan->data_q);
3145 blocks = __get_blocks(hdev, skb);
3146 if (blocks > hdev->block_cnt)
3149 hci_conn_enter_active_mode(chan->conn,
3150 bt_cb(skb)->force_active);
3152 hci_send_frame(skb);
3153 hdev->acl_last_tx = jiffies;
3155 hdev->block_cnt -= blocks;
3158 chan->sent += blocks;
3159 chan->conn->sent += blocks;
3163 if (cnt != hdev->block_cnt)
3164 hci_prio_recalculate(hdev, type);
3167 static void hci_sched_acl(struct hci_dev *hdev)
3169 BT_DBG("%s", hdev->name);
3171 /* No ACL link over BR/EDR controller */
3172 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3175 /* No AMP link over AMP controller */
3176 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3179 switch (hdev->flow_ctl_mode) {
3180 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3181 hci_sched_acl_pkt(hdev);
3184 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3185 hci_sched_acl_blk(hdev);
3191 static void hci_sched_sco(struct hci_dev *hdev)
3193 struct hci_conn *conn;
3194 struct sk_buff *skb;
3197 BT_DBG("%s", hdev->name);
3199 if (!hci_conn_num(hdev, SCO_LINK))
3202 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3203 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3204 BT_DBG("skb %p len %d", skb, skb->len);
3205 hci_send_frame(skb);
3208 if (conn->sent == ~0)
3214 static void hci_sched_esco(struct hci_dev *hdev)
3216 struct hci_conn *conn;
3217 struct sk_buff *skb;
3220 BT_DBG("%s", hdev->name);
3222 if (!hci_conn_num(hdev, ESCO_LINK))
3225 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3227 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3228 BT_DBG("skb %p len %d", skb, skb->len);
3229 hci_send_frame(skb);
3232 if (conn->sent == ~0)
3238 static void hci_sched_le(struct hci_dev *hdev)
3240 struct hci_chan *chan;
3241 struct sk_buff *skb;
3242 int quote, cnt, tmp;
3244 BT_DBG("%s", hdev->name);
3246 if (!hci_conn_num(hdev, LE_LINK))
3249 if (!test_bit(HCI_RAW, &hdev->flags)) {
3250 /* LE tx timeout must be longer than maximum
3251 * link supervision timeout (40.9 seconds) */
3252 if (!hdev->le_cnt && hdev->le_pkts &&
3253 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3254 hci_link_tx_to(hdev, LE_LINK);
3257 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3259 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3260 u32 priority = (skb_peek(&chan->data_q))->priority;
3261 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3262 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3263 skb->len, skb->priority);
3265 /* Stop if priority has changed */
3266 if (skb->priority < priority)
3269 skb = skb_dequeue(&chan->data_q);
3271 hci_send_frame(skb);
3272 hdev->le_last_tx = jiffies;
3283 hdev->acl_cnt = cnt;
3286 hci_prio_recalculate(hdev, LE_LINK);
3289 static void hci_tx_work(struct work_struct *work)
3291 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3292 struct sk_buff *skb;
3294 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3295 hdev->sco_cnt, hdev->le_cnt);
3297 /* Schedule queues and send stuff to HCI driver */
3299 hci_sched_acl(hdev);
3301 hci_sched_sco(hdev);
3303 hci_sched_esco(hdev);
3307 /* Send next queued raw (unknown type) packet */
3308 while ((skb = skb_dequeue(&hdev->raw_q)))
3309 hci_send_frame(skb);
3312 /* ----- HCI RX task (incoming data processing) ----- */
3314 /* ACL data packet */
3315 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3317 struct hci_acl_hdr *hdr = (void *) skb->data;
3318 struct hci_conn *conn;
3319 __u16 handle, flags;
3321 skb_pull(skb, HCI_ACL_HDR_SIZE);
3323 handle = __le16_to_cpu(hdr->handle);
3324 flags = hci_flags(handle);
3325 handle = hci_handle(handle);
3327 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3330 hdev->stat.acl_rx++;
3333 conn = hci_conn_hash_lookup_handle(hdev, handle);
3334 hci_dev_unlock(hdev);
3337 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3339 /* Send to upper protocol */
3340 l2cap_recv_acldata(conn, skb, flags);
3343 BT_ERR("%s ACL packet for unknown connection handle %d",
3344 hdev->name, handle);
3350 /* SCO data packet */
3351 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3353 struct hci_sco_hdr *hdr = (void *) skb->data;
3354 struct hci_conn *conn;
3357 skb_pull(skb, HCI_SCO_HDR_SIZE);
3359 handle = __le16_to_cpu(hdr->handle);
3361 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3363 hdev->stat.sco_rx++;
3366 conn = hci_conn_hash_lookup_handle(hdev, handle);
3367 hci_dev_unlock(hdev);
3370 /* Send to upper protocol */
3371 sco_recv_scodata(conn, skb);
3374 BT_ERR("%s SCO packet for unknown connection handle %d",
3375 hdev->name, handle);
3381 static bool hci_req_is_complete(struct hci_dev *hdev)
3383 struct sk_buff *skb;
3385 skb = skb_peek(&hdev->cmd_q);
3389 return bt_cb(skb)->req.start;
3392 static void hci_resend_last(struct hci_dev *hdev)
3394 struct hci_command_hdr *sent;
3395 struct sk_buff *skb;
3398 if (!hdev->sent_cmd)
3401 sent = (void *) hdev->sent_cmd->data;
3402 opcode = __le16_to_cpu(sent->opcode);
3403 if (opcode == HCI_OP_RESET)
3406 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3410 skb_queue_head(&hdev->cmd_q, skb);
3411 queue_work(hdev->workqueue, &hdev->cmd_work);
3414 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3416 hci_req_complete_t req_complete = NULL;
3417 struct sk_buff *skb;
3418 unsigned long flags;
3420 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3422 /* If the completed command doesn't match the last one that was
3423 * sent we need to do special handling of it.
3425 if (!hci_sent_cmd_data(hdev, opcode)) {
3426 /* Some CSR based controllers generate a spontaneous
3427 * reset complete event during init and any pending
3428 * command will never be completed. In such a case we
3429 * need to resend whatever was the last sent
3432 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3433 hci_resend_last(hdev);
3438 /* If the command succeeded and there's still more commands in
3439 * this request the request is not yet complete.
3441 if (!status && !hci_req_is_complete(hdev))
3444 /* If this was the last command in a request the complete
3445 * callback would be found in hdev->sent_cmd instead of the
3446 * command queue (hdev->cmd_q).
3448 if (hdev->sent_cmd) {
3449 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3454 /* Remove all pending commands belonging to this request */
3455 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3456 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3457 if (bt_cb(skb)->req.start) {
3458 __skb_queue_head(&hdev->cmd_q, skb);
3462 req_complete = bt_cb(skb)->req.complete;
3465 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3469 req_complete(hdev, status);
3472 static void hci_rx_work(struct work_struct *work)
3474 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3475 struct sk_buff *skb;
3477 BT_DBG("%s", hdev->name);
3479 while ((skb = skb_dequeue(&hdev->rx_q))) {
3480 /* Send copy to monitor */
3481 hci_send_to_monitor(hdev, skb);
3483 if (atomic_read(&hdev->promisc)) {
3484 /* Send copy to the sockets */
3485 hci_send_to_sock(hdev, skb);
3488 if (test_bit(HCI_RAW, &hdev->flags)) {
3493 if (test_bit(HCI_INIT, &hdev->flags)) {
3494 /* Don't process data packets in this states. */
3495 switch (bt_cb(skb)->pkt_type) {
3496 case HCI_ACLDATA_PKT:
3497 case HCI_SCODATA_PKT:
3504 switch (bt_cb(skb)->pkt_type) {
3506 BT_DBG("%s Event packet", hdev->name);
3507 hci_event_packet(hdev, skb);
3510 case HCI_ACLDATA_PKT:
3511 BT_DBG("%s ACL data packet", hdev->name);
3512 hci_acldata_packet(hdev, skb);
3515 case HCI_SCODATA_PKT:
3516 BT_DBG("%s SCO data packet", hdev->name);
3517 hci_scodata_packet(hdev, skb);
3527 static void hci_cmd_work(struct work_struct *work)
3529 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3530 struct sk_buff *skb;
3532 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3533 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3535 /* Send queued commands */
3536 if (atomic_read(&hdev->cmd_cnt)) {
3537 skb = skb_dequeue(&hdev->cmd_q);
3541 kfree_skb(hdev->sent_cmd);
3543 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3544 if (hdev->sent_cmd) {
3545 atomic_dec(&hdev->cmd_cnt);
3546 hci_send_frame(skb);
3547 if (test_bit(HCI_RESET, &hdev->flags))
3548 del_timer(&hdev->cmd_timer);
3550 mod_timer(&hdev->cmd_timer,
3551 jiffies + HCI_CMD_TIMEOUT);
3553 skb_queue_head(&hdev->cmd_q, skb);
3554 queue_work(hdev->workqueue, &hdev->cmd_work);
3559 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3561 /* General inquiry access code (GIAC) */
3562 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3563 struct hci_cp_inquiry cp;
3565 BT_DBG("%s", hdev->name);
3567 if (test_bit(HCI_INQUIRY, &hdev->flags))
3568 return -EINPROGRESS;
3570 inquiry_cache_flush(hdev);
3572 memset(&cp, 0, sizeof(cp));
3573 memcpy(&cp.lap, lap, sizeof(cp.lap));
3576 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3579 int hci_cancel_inquiry(struct hci_dev *hdev)
3581 BT_DBG("%s", hdev->name);
3583 if (!test_bit(HCI_INQUIRY, &hdev->flags))
3586 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3589 u8 bdaddr_to_le(u8 bdaddr_type)
3591 switch (bdaddr_type) {
3592 case BDADDR_LE_PUBLIC:
3593 return ADDR_LE_DEV_PUBLIC;
3596 /* Fallback to LE Random address type */
3597 return ADDR_LE_DEV_RANDOM;