2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
92 hdev->recv_evt = NULL;
97 return ERR_PTR(-ENODATA);
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
108 if (hdr->evt != event)
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
126 if (opcode == __le16_to_cpu(ev->opcode))
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
134 return ERR_PTR(-ENODATA);
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138 const void *param, u8 event, u32 timeout)
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
144 BT_DBG("%s", hdev->name);
146 hci_req_init(&req, hdev);
148 hci_req_add_ev(&req, opcode, plen, param, event);
150 hdev->req_status = HCI_REQ_PEND;
152 err = hci_req_run(&req, hci_req_sync_complete);
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
159 schedule_timeout(timeout);
161 remove_wait_queue(&hdev->req_wait_q, &wait);
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
166 switch (hdev->req_status) {
168 err = -bt_to_errno(hdev->req_result);
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
180 hdev->req_status = hdev->req_result = 0;
182 BT_DBG("%s end: err %d", hdev->name, err);
187 return hci_get_cmd_complete(hdev, opcode, event);
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192 const void *param, u32 timeout)
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
196 EXPORT_SYMBOL(__hci_cmd_sync);
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200 void (*func)(struct hci_request *req,
202 unsigned long opt, __u32 timeout)
204 struct hci_request req;
205 DECLARE_WAITQUEUE(wait, current);
208 BT_DBG("%s start", hdev->name);
210 hci_req_init(&req, hdev);
212 hdev->req_status = HCI_REQ_PEND;
216 err = hci_req_run(&req, hci_req_sync_complete);
218 hdev->req_status = 0;
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
234 schedule_timeout(timeout);
236 remove_wait_queue(&hdev->req_wait_q, &wait);
238 if (signal_pending(current))
241 switch (hdev->req_status) {
243 err = -bt_to_errno(hdev->req_result);
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
255 hdev->req_status = hdev->req_result = 0;
257 BT_DBG("%s end: err %d", hdev->name, err);
262 static int hci_req_sync(struct hci_dev *hdev,
263 void (*req)(struct hci_request *req,
265 unsigned long opt, __u32 timeout)
269 if (!test_bit(HCI_UP, &hdev->flags))
272 /* Serialize all requests */
274 ret = __hci_req_sync(hdev, req, opt, timeout);
275 hci_req_unlock(hdev);
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
282 BT_DBG("%s %ld", req->hdev->name, opt);
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
289 static void bredr_init(struct hci_request *req)
291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
293 /* Read Local Supported Features */
294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
296 /* Read Local Version */
297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
299 /* Read BD Address */
300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
303 static void amp_init(struct hci_request *req)
305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
307 /* Read Local Version */
308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
310 /* Read Local AMP Info */
311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
313 /* Read Data Blk size */
314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
319 struct hci_dev *hdev = req->hdev;
321 BT_DBG("%s %ld", hdev->name, opt);
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325 hci_reset_req(req, 0);
327 switch (hdev->dev_type) {
337 BT_ERR("Unknown device type %d", hdev->dev_type);
342 static void bredr_setup(struct hci_request *req)
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
350 /* Read Class of Device */
351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
353 /* Read Local Name */
354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
356 /* Read Voice Setting */
357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
374 static void le_setup(struct hci_request *req)
376 struct hci_dev *hdev = req->hdev;
378 /* Read LE Buffer Size */
379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
381 /* Read LE Local Supported Features */
382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
384 /* Read LE Advertising Channel TX Power */
385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
387 /* Read LE White List Size */
388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
390 /* Read LE Supported States */
391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
400 if (lmp_ext_inq_capable(hdev))
403 if (lmp_inq_rssi_capable(hdev))
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
426 static void hci_setup_inquiry_mode(struct hci_request *req)
430 mode = hci_get_inquiry_mode(req->hdev);
432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
435 static void hci_setup_event_mask(struct hci_request *req)
437 struct hci_dev *hdev = req->hdev;
439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
459 if (lmp_inq_rssi_capable(hdev))
460 events[4] |= 0x02; /* Inquiry Result with RSSI */
462 if (lmp_sniffsubr_capable(hdev))
463 events[5] |= 0x20; /* Sniff Subrating */
465 if (lmp_pause_enc_capable(hdev))
466 events[5] |= 0x80; /* Encryption Key Refresh Complete */
468 if (lmp_ext_inq_capable(hdev))
469 events[5] |= 0x40; /* Extended Inquiry Result */
471 if (lmp_no_flush_capable(hdev))
472 events[7] |= 0x01; /* Enhanced Flush Complete */
474 if (lmp_lsto_capable(hdev))
475 events[6] |= 0x80; /* Link Supervision Timeout Changed */
477 if (lmp_ssp_capable(hdev)) {
478 events[6] |= 0x01; /* IO Capability Request */
479 events[6] |= 0x02; /* IO Capability Response */
480 events[6] |= 0x04; /* User Confirmation Request */
481 events[6] |= 0x08; /* User Passkey Request */
482 events[6] |= 0x10; /* Remote OOB Data Request */
483 events[6] |= 0x20; /* Simple Pairing Complete */
484 events[7] |= 0x04; /* User Passkey Notification */
485 events[7] |= 0x08; /* Keypress Notification */
486 events[7] |= 0x10; /* Remote Host Supported
487 * Features Notification
491 if (lmp_le_capable(hdev))
492 events[7] |= 0x20; /* LE Meta-Event */
494 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
496 if (lmp_le_capable(hdev)) {
497 memset(events, 0, sizeof(events));
499 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
500 sizeof(events), events);
504 static void hci_init2_req(struct hci_request *req, unsigned long opt)
506 struct hci_dev *hdev = req->hdev;
508 if (lmp_bredr_capable(hdev))
511 if (lmp_le_capable(hdev))
514 hci_setup_event_mask(req);
516 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
517 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
519 if (lmp_ssp_capable(hdev)) {
520 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
522 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
523 sizeof(mode), &mode);
525 struct hci_cp_write_eir cp;
527 memset(hdev->eir, 0, sizeof(hdev->eir));
528 memset(&cp, 0, sizeof(cp));
530 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
534 if (lmp_inq_rssi_capable(hdev))
535 hci_setup_inquiry_mode(req);
537 if (lmp_inq_tx_pwr_capable(hdev))
538 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
540 if (lmp_ext_feat_capable(hdev)) {
541 struct hci_cp_read_local_ext_features cp;
544 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
548 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
550 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
555 static void hci_setup_link_policy(struct hci_request *req)
557 struct hci_dev *hdev = req->hdev;
558 struct hci_cp_write_def_link_policy cp;
561 if (lmp_rswitch_capable(hdev))
562 link_policy |= HCI_LP_RSWITCH;
563 if (lmp_hold_capable(hdev))
564 link_policy |= HCI_LP_HOLD;
565 if (lmp_sniff_capable(hdev))
566 link_policy |= HCI_LP_SNIFF;
567 if (lmp_park_capable(hdev))
568 link_policy |= HCI_LP_PARK;
570 cp.policy = cpu_to_le16(link_policy);
571 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
574 static void hci_set_le_support(struct hci_request *req)
576 struct hci_dev *hdev = req->hdev;
577 struct hci_cp_write_le_host_supported cp;
579 /* LE-only devices do not support explicit enablement */
580 if (!lmp_bredr_capable(hdev))
583 memset(&cp, 0, sizeof(cp));
585 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
587 cp.simul = lmp_le_br_capable(hdev);
590 if (cp.le != lmp_host_le_capable(hdev))
591 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
595 static void hci_init3_req(struct hci_request *req, unsigned long opt)
597 struct hci_dev *hdev = req->hdev;
600 /* Only send HCI_Delete_Stored_Link_Key if it is supported */
601 if (hdev->commands[6] & 0x80) {
602 struct hci_cp_delete_stored_link_key cp;
604 bacpy(&cp.bdaddr, BDADDR_ANY);
605 cp.delete_all = 0x01;
606 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
610 if (hdev->commands[5] & 0x10)
611 hci_setup_link_policy(req);
613 if (lmp_le_capable(hdev)) {
614 hci_set_le_support(req);
618 /* Read features beyond page 1 if available */
619 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
620 struct hci_cp_read_local_ext_features cp;
623 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
628 static int __hci_init(struct hci_dev *hdev)
632 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
636 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
637 * BR/EDR/LE type controllers. AMP controllers only need the
640 if (hdev->dev_type != HCI_BREDR)
643 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
647 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
650 static void hci_scan_req(struct hci_request *req, unsigned long opt)
654 BT_DBG("%s %x", req->hdev->name, scan);
656 /* Inquiry and Page scans */
657 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
660 static void hci_auth_req(struct hci_request *req, unsigned long opt)
664 BT_DBG("%s %x", req->hdev->name, auth);
667 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
670 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
674 BT_DBG("%s %x", req->hdev->name, encrypt);
677 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
680 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
682 __le16 policy = cpu_to_le16(opt);
684 BT_DBG("%s %x", req->hdev->name, policy);
686 /* Default link policy */
687 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
690 /* Get HCI device by index.
691 * Device is held on return. */
692 struct hci_dev *hci_dev_get(int index)
694 struct hci_dev *hdev = NULL, *d;
701 read_lock(&hci_dev_list_lock);
702 list_for_each_entry(d, &hci_dev_list, list) {
703 if (d->id == index) {
704 hdev = hci_dev_hold(d);
708 read_unlock(&hci_dev_list_lock);
712 /* ---- Inquiry support ---- */
714 bool hci_discovery_active(struct hci_dev *hdev)
716 struct discovery_state *discov = &hdev->discovery;
718 switch (discov->state) {
719 case DISCOVERY_FINDING:
720 case DISCOVERY_RESOLVING:
728 void hci_discovery_set_state(struct hci_dev *hdev, int state)
730 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
732 if (hdev->discovery.state == state)
736 case DISCOVERY_STOPPED:
737 if (hdev->discovery.state != DISCOVERY_STARTING)
738 mgmt_discovering(hdev, 0);
740 case DISCOVERY_STARTING:
742 case DISCOVERY_FINDING:
743 mgmt_discovering(hdev, 1);
745 case DISCOVERY_RESOLVING:
747 case DISCOVERY_STOPPING:
751 hdev->discovery.state = state;
754 static void inquiry_cache_flush(struct hci_dev *hdev)
756 struct discovery_state *cache = &hdev->discovery;
757 struct inquiry_entry *p, *n;
759 list_for_each_entry_safe(p, n, &cache->all, all) {
764 INIT_LIST_HEAD(&cache->unknown);
765 INIT_LIST_HEAD(&cache->resolve);
768 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
771 struct discovery_state *cache = &hdev->discovery;
772 struct inquiry_entry *e;
774 BT_DBG("cache %p, %pMR", cache, bdaddr);
776 list_for_each_entry(e, &cache->all, all) {
777 if (!bacmp(&e->data.bdaddr, bdaddr))
784 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
787 struct discovery_state *cache = &hdev->discovery;
788 struct inquiry_entry *e;
790 BT_DBG("cache %p, %pMR", cache, bdaddr);
792 list_for_each_entry(e, &cache->unknown, list) {
793 if (!bacmp(&e->data.bdaddr, bdaddr))
800 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
804 struct discovery_state *cache = &hdev->discovery;
805 struct inquiry_entry *e;
807 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
809 list_for_each_entry(e, &cache->resolve, list) {
810 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
812 if (!bacmp(&e->data.bdaddr, bdaddr))
819 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
820 struct inquiry_entry *ie)
822 struct discovery_state *cache = &hdev->discovery;
823 struct list_head *pos = &cache->resolve;
824 struct inquiry_entry *p;
828 list_for_each_entry(p, &cache->resolve, list) {
829 if (p->name_state != NAME_PENDING &&
830 abs(p->data.rssi) >= abs(ie->data.rssi))
835 list_add(&ie->list, pos);
838 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
839 bool name_known, bool *ssp)
841 struct discovery_state *cache = &hdev->discovery;
842 struct inquiry_entry *ie;
844 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
846 hci_remove_remote_oob_data(hdev, &data->bdaddr);
849 *ssp = data->ssp_mode;
851 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
853 if (ie->data.ssp_mode && ssp)
856 if (ie->name_state == NAME_NEEDED &&
857 data->rssi != ie->data.rssi) {
858 ie->data.rssi = data->rssi;
859 hci_inquiry_cache_update_resolve(hdev, ie);
865 /* Entry not in the cache. Add new one. */
866 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
870 list_add(&ie->all, &cache->all);
873 ie->name_state = NAME_KNOWN;
875 ie->name_state = NAME_NOT_KNOWN;
876 list_add(&ie->list, &cache->unknown);
880 if (name_known && ie->name_state != NAME_KNOWN &&
881 ie->name_state != NAME_PENDING) {
882 ie->name_state = NAME_KNOWN;
886 memcpy(&ie->data, data, sizeof(*data));
887 ie->timestamp = jiffies;
888 cache->timestamp = jiffies;
890 if (ie->name_state == NAME_NOT_KNOWN)
896 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
898 struct discovery_state *cache = &hdev->discovery;
899 struct inquiry_info *info = (struct inquiry_info *) buf;
900 struct inquiry_entry *e;
903 list_for_each_entry(e, &cache->all, all) {
904 struct inquiry_data *data = &e->data;
909 bacpy(&info->bdaddr, &data->bdaddr);
910 info->pscan_rep_mode = data->pscan_rep_mode;
911 info->pscan_period_mode = data->pscan_period_mode;
912 info->pscan_mode = data->pscan_mode;
913 memcpy(info->dev_class, data->dev_class, 3);
914 info->clock_offset = data->clock_offset;
920 BT_DBG("cache %p, copied %d", cache, copied);
924 static void hci_inq_req(struct hci_request *req, unsigned long opt)
926 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
927 struct hci_dev *hdev = req->hdev;
928 struct hci_cp_inquiry cp;
930 BT_DBG("%s", hdev->name);
932 if (test_bit(HCI_INQUIRY, &hdev->flags))
936 memcpy(&cp.lap, &ir->lap, 3);
937 cp.length = ir->length;
938 cp.num_rsp = ir->num_rsp;
939 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
942 static int wait_inquiry(void *word)
945 return signal_pending(current);
948 int hci_inquiry(void __user *arg)
950 __u8 __user *ptr = arg;
951 struct hci_inquiry_req ir;
952 struct hci_dev *hdev;
953 int err = 0, do_inquiry = 0, max_rsp;
957 if (copy_from_user(&ir, ptr, sizeof(ir)))
960 hdev = hci_dev_get(ir.dev_id);
965 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
966 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
967 inquiry_cache_flush(hdev);
970 hci_dev_unlock(hdev);
972 timeo = ir.length * msecs_to_jiffies(2000);
975 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
980 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
981 * cleared). If it is interrupted by a signal, return -EINTR.
983 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
988 /* for unlimited number of responses we will use buffer with
991 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
993 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
994 * copy it to the user space.
996 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1003 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1004 hci_dev_unlock(hdev);
1006 BT_DBG("num_rsp %d", ir.num_rsp);
1008 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1010 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1023 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1025 u8 ad_len = 0, flags = 0;
1028 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1029 flags |= LE_AD_GENERAL;
1031 if (!lmp_bredr_capable(hdev))
1032 flags |= LE_AD_NO_BREDR;
1034 if (lmp_le_br_capable(hdev))
1035 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1037 if (lmp_host_le_br_capable(hdev))
1038 flags |= LE_AD_SIM_LE_BREDR_HOST;
1041 BT_DBG("adv flags 0x%02x", flags);
1051 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1053 ptr[1] = EIR_TX_POWER;
1054 ptr[2] = (u8) hdev->adv_tx_power;
1060 name_len = strlen(hdev->dev_name);
1062 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1064 if (name_len > max_len) {
1066 ptr[1] = EIR_NAME_SHORT;
1068 ptr[1] = EIR_NAME_COMPLETE;
1070 ptr[0] = name_len + 1;
1072 memcpy(ptr + 2, hdev->dev_name, name_len);
1074 ad_len += (name_len + 2);
1075 ptr += (name_len + 2);
1081 void hci_update_ad(struct hci_request *req)
1083 struct hci_dev *hdev = req->hdev;
1084 struct hci_cp_le_set_adv_data cp;
1087 if (!lmp_le_capable(hdev))
1090 memset(&cp, 0, sizeof(cp));
1092 len = create_ad(hdev, cp.data);
1094 if (hdev->adv_data_len == len &&
1095 memcmp(cp.data, hdev->adv_data, len) == 0)
1098 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1099 hdev->adv_data_len = len;
1103 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1106 /* ---- HCI ioctl helpers ---- */
1108 int hci_dev_open(__u16 dev)
1110 struct hci_dev *hdev;
1113 hdev = hci_dev_get(dev);
1117 BT_DBG("%s %p", hdev->name, hdev);
1121 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1126 /* Check for rfkill but allow the HCI setup stage to proceed
1127 * (which in itself doesn't cause any RF activity).
1129 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1130 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1135 if (test_bit(HCI_UP, &hdev->flags)) {
1140 if (hdev->open(hdev)) {
1145 atomic_set(&hdev->cmd_cnt, 1);
1146 set_bit(HCI_INIT, &hdev->flags);
1148 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1149 ret = hdev->setup(hdev);
1152 /* Treat all non BR/EDR controllers as raw devices if
1153 * enable_hs is not set.
1155 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1156 set_bit(HCI_RAW, &hdev->flags);
1158 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1159 set_bit(HCI_RAW, &hdev->flags);
1161 if (!test_bit(HCI_RAW, &hdev->flags))
1162 ret = __hci_init(hdev);
1165 clear_bit(HCI_INIT, &hdev->flags);
1169 set_bit(HCI_UP, &hdev->flags);
1170 hci_notify(hdev, HCI_DEV_UP);
1171 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1172 mgmt_valid_hdev(hdev)) {
1174 mgmt_powered(hdev, 1);
1175 hci_dev_unlock(hdev);
1178 /* Init failed, cleanup */
1179 flush_work(&hdev->tx_work);
1180 flush_work(&hdev->cmd_work);
1181 flush_work(&hdev->rx_work);
1183 skb_queue_purge(&hdev->cmd_q);
1184 skb_queue_purge(&hdev->rx_q);
1189 if (hdev->sent_cmd) {
1190 kfree_skb(hdev->sent_cmd);
1191 hdev->sent_cmd = NULL;
1199 hci_req_unlock(hdev);
1204 static int hci_dev_do_close(struct hci_dev *hdev)
1206 BT_DBG("%s %p", hdev->name, hdev);
1208 cancel_work_sync(&hdev->le_scan);
1210 cancel_delayed_work(&hdev->power_off);
1212 hci_req_cancel(hdev, ENODEV);
1215 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1216 del_timer_sync(&hdev->cmd_timer);
1217 hci_req_unlock(hdev);
1221 /* Flush RX and TX works */
1222 flush_work(&hdev->tx_work);
1223 flush_work(&hdev->rx_work);
1225 if (hdev->discov_timeout > 0) {
1226 cancel_delayed_work(&hdev->discov_off);
1227 hdev->discov_timeout = 0;
1228 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1231 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1232 cancel_delayed_work(&hdev->service_cache);
1234 cancel_delayed_work_sync(&hdev->le_scan_disable);
1237 inquiry_cache_flush(hdev);
1238 hci_conn_hash_flush(hdev);
1239 hci_dev_unlock(hdev);
1241 hci_notify(hdev, HCI_DEV_DOWN);
1247 skb_queue_purge(&hdev->cmd_q);
1248 atomic_set(&hdev->cmd_cnt, 1);
1249 if (!test_bit(HCI_RAW, &hdev->flags) &&
1250 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1251 set_bit(HCI_INIT, &hdev->flags);
1252 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1253 clear_bit(HCI_INIT, &hdev->flags);
1256 /* flush cmd work */
1257 flush_work(&hdev->cmd_work);
1260 skb_queue_purge(&hdev->rx_q);
1261 skb_queue_purge(&hdev->cmd_q);
1262 skb_queue_purge(&hdev->raw_q);
1264 /* Drop last sent command */
1265 if (hdev->sent_cmd) {
1266 del_timer_sync(&hdev->cmd_timer);
1267 kfree_skb(hdev->sent_cmd);
1268 hdev->sent_cmd = NULL;
1271 kfree_skb(hdev->recv_evt);
1272 hdev->recv_evt = NULL;
1274 /* After this point our queues are empty
1275 * and no tasks are scheduled. */
1280 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1282 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1283 mgmt_valid_hdev(hdev)) {
1285 mgmt_powered(hdev, 0);
1286 hci_dev_unlock(hdev);
1289 /* Controller radio is available but is currently powered down */
1290 hdev->amp_status = 0;
1292 memset(hdev->eir, 0, sizeof(hdev->eir));
1293 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1295 hci_req_unlock(hdev);
1301 int hci_dev_close(__u16 dev)
1303 struct hci_dev *hdev;
1306 hdev = hci_dev_get(dev);
1310 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1311 cancel_delayed_work(&hdev->power_off);
1313 err = hci_dev_do_close(hdev);
1319 int hci_dev_reset(__u16 dev)
1321 struct hci_dev *hdev;
1324 hdev = hci_dev_get(dev);
1330 if (!test_bit(HCI_UP, &hdev->flags))
1334 skb_queue_purge(&hdev->rx_q);
1335 skb_queue_purge(&hdev->cmd_q);
1338 inquiry_cache_flush(hdev);
1339 hci_conn_hash_flush(hdev);
1340 hci_dev_unlock(hdev);
1345 atomic_set(&hdev->cmd_cnt, 1);
1346 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1348 if (!test_bit(HCI_RAW, &hdev->flags))
1349 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1352 hci_req_unlock(hdev);
1357 int hci_dev_reset_stat(__u16 dev)
1359 struct hci_dev *hdev;
1362 hdev = hci_dev_get(dev);
1366 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1373 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1375 struct hci_dev *hdev;
1376 struct hci_dev_req dr;
1379 if (copy_from_user(&dr, arg, sizeof(dr)))
1382 hdev = hci_dev_get(dr.dev_id);
1388 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1393 if (!lmp_encrypt_capable(hdev)) {
1398 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1399 /* Auth must be enabled first */
1400 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1406 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1411 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1416 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1420 case HCISETLINKMODE:
1421 hdev->link_mode = ((__u16) dr.dev_opt) &
1422 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1426 hdev->pkt_type = (__u16) dr.dev_opt;
1430 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1431 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1435 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1436 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1448 int hci_get_dev_list(void __user *arg)
1450 struct hci_dev *hdev;
1451 struct hci_dev_list_req *dl;
1452 struct hci_dev_req *dr;
1453 int n = 0, size, err;
1456 if (get_user(dev_num, (__u16 __user *) arg))
1459 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1462 size = sizeof(*dl) + dev_num * sizeof(*dr);
1464 dl = kzalloc(size, GFP_KERNEL);
1470 read_lock(&hci_dev_list_lock);
1471 list_for_each_entry(hdev, &hci_dev_list, list) {
1472 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1473 cancel_delayed_work(&hdev->power_off);
1475 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1476 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1478 (dr + n)->dev_id = hdev->id;
1479 (dr + n)->dev_opt = hdev->flags;
1484 read_unlock(&hci_dev_list_lock);
1487 size = sizeof(*dl) + n * sizeof(*dr);
1489 err = copy_to_user(arg, dl, size);
1492 return err ? -EFAULT : 0;
1495 int hci_get_dev_info(void __user *arg)
1497 struct hci_dev *hdev;
1498 struct hci_dev_info di;
1501 if (copy_from_user(&di, arg, sizeof(di)))
1504 hdev = hci_dev_get(di.dev_id);
1508 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1509 cancel_delayed_work_sync(&hdev->power_off);
1511 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1512 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1514 strcpy(di.name, hdev->name);
1515 di.bdaddr = hdev->bdaddr;
1516 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1517 di.flags = hdev->flags;
1518 di.pkt_type = hdev->pkt_type;
1519 if (lmp_bredr_capable(hdev)) {
1520 di.acl_mtu = hdev->acl_mtu;
1521 di.acl_pkts = hdev->acl_pkts;
1522 di.sco_mtu = hdev->sco_mtu;
1523 di.sco_pkts = hdev->sco_pkts;
1525 di.acl_mtu = hdev->le_mtu;
1526 di.acl_pkts = hdev->le_pkts;
1530 di.link_policy = hdev->link_policy;
1531 di.link_mode = hdev->link_mode;
1533 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1534 memcpy(&di.features, &hdev->features, sizeof(di.features));
1536 if (copy_to_user(arg, &di, sizeof(di)))
1544 /* ---- Interface to HCI drivers ---- */
1546 static int hci_rfkill_set_block(void *data, bool blocked)
1548 struct hci_dev *hdev = data;
1550 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1553 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1554 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1555 hci_dev_do_close(hdev);
1557 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1563 static const struct rfkill_ops hci_rfkill_ops = {
1564 .set_block = hci_rfkill_set_block,
1567 static void hci_power_on(struct work_struct *work)
1569 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1572 BT_DBG("%s", hdev->name);
1574 err = hci_dev_open(hdev->id);
1576 mgmt_set_powered_failed(hdev, err);
1580 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1581 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1582 hci_dev_do_close(hdev);
1583 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1584 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1585 HCI_AUTO_OFF_TIMEOUT);
1588 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1589 mgmt_index_added(hdev);
1592 static void hci_power_off(struct work_struct *work)
1594 struct hci_dev *hdev = container_of(work, struct hci_dev,
1597 BT_DBG("%s", hdev->name);
1599 hci_dev_do_close(hdev);
1602 static void hci_discov_off(struct work_struct *work)
1604 struct hci_dev *hdev;
1605 u8 scan = SCAN_PAGE;
1607 hdev = container_of(work, struct hci_dev, discov_off.work);
1609 BT_DBG("%s", hdev->name);
1613 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1615 hdev->discov_timeout = 0;
1617 hci_dev_unlock(hdev);
1620 int hci_uuids_clear(struct hci_dev *hdev)
1622 struct bt_uuid *uuid, *tmp;
1624 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1625 list_del(&uuid->list);
1632 int hci_link_keys_clear(struct hci_dev *hdev)
1634 struct list_head *p, *n;
1636 list_for_each_safe(p, n, &hdev->link_keys) {
1637 struct link_key *key;
1639 key = list_entry(p, struct link_key, list);
1648 int hci_smp_ltks_clear(struct hci_dev *hdev)
1650 struct smp_ltk *k, *tmp;
1652 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1660 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1664 list_for_each_entry(k, &hdev->link_keys, list)
1665 if (bacmp(bdaddr, &k->bdaddr) == 0)
1671 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1672 u8 key_type, u8 old_key_type)
1675 if (key_type < 0x03)
1678 /* Debug keys are insecure so don't store them persistently */
1679 if (key_type == HCI_LK_DEBUG_COMBINATION)
1682 /* Changed combination key and there's no previous one */
1683 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1686 /* Security mode 3 case */
1690 /* Neither local nor remote side had no-bonding as requirement */
1691 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1694 /* Local side had dedicated bonding as requirement */
1695 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1698 /* Remote side had dedicated bonding as requirement */
1699 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1702 /* If none of the above criteria match, then don't store the key
1707 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1711 list_for_each_entry(k, &hdev->long_term_keys, list) {
1712 if (k->ediv != ediv ||
1713 memcmp(rand, k->rand, sizeof(k->rand)))
1722 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1727 list_for_each_entry(k, &hdev->long_term_keys, list)
1728 if (addr_type == k->bdaddr_type &&
1729 bacmp(bdaddr, &k->bdaddr) == 0)
1735 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1736 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1738 struct link_key *key, *old_key;
1742 old_key = hci_find_link_key(hdev, bdaddr);
1744 old_key_type = old_key->type;
1747 old_key_type = conn ? conn->key_type : 0xff;
1748 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1751 list_add(&key->list, &hdev->link_keys);
1754 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1756 /* Some buggy controller combinations generate a changed
1757 * combination key for legacy pairing even when there's no
1759 if (type == HCI_LK_CHANGED_COMBINATION &&
1760 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1761 type = HCI_LK_COMBINATION;
1763 conn->key_type = type;
1766 bacpy(&key->bdaddr, bdaddr);
1767 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1768 key->pin_len = pin_len;
1770 if (type == HCI_LK_CHANGED_COMBINATION)
1771 key->type = old_key_type;
1778 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1780 mgmt_new_link_key(hdev, key, persistent);
1783 conn->flush_key = !persistent;
1788 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1789 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1792 struct smp_ltk *key, *old_key;
1794 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1797 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1801 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1804 list_add(&key->list, &hdev->long_term_keys);
1807 bacpy(&key->bdaddr, bdaddr);
1808 key->bdaddr_type = addr_type;
1809 memcpy(key->val, tk, sizeof(key->val));
1810 key->authenticated = authenticated;
1812 key->enc_size = enc_size;
1814 memcpy(key->rand, rand, sizeof(key->rand));
1819 if (type & HCI_SMP_LTK)
1820 mgmt_new_ltk(hdev, key, 1);
1825 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1827 struct link_key *key;
1829 key = hci_find_link_key(hdev, bdaddr);
1833 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1835 list_del(&key->list);
1841 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1843 struct smp_ltk *k, *tmp;
1845 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1846 if (bacmp(bdaddr, &k->bdaddr))
1849 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1858 /* HCI command timer function */
1859 static void hci_cmd_timeout(unsigned long arg)
1861 struct hci_dev *hdev = (void *) arg;
1863 if (hdev->sent_cmd) {
1864 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1865 u16 opcode = __le16_to_cpu(sent->opcode);
1867 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1869 BT_ERR("%s command tx timeout", hdev->name);
1872 atomic_set(&hdev->cmd_cnt, 1);
1873 queue_work(hdev->workqueue, &hdev->cmd_work);
1876 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1879 struct oob_data *data;
1881 list_for_each_entry(data, &hdev->remote_oob_data, list)
1882 if (bacmp(bdaddr, &data->bdaddr) == 0)
1888 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1890 struct oob_data *data;
1892 data = hci_find_remote_oob_data(hdev, bdaddr);
1896 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1898 list_del(&data->list);
1904 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1906 struct oob_data *data, *n;
1908 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1909 list_del(&data->list);
1916 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1919 struct oob_data *data;
1921 data = hci_find_remote_oob_data(hdev, bdaddr);
1924 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1928 bacpy(&data->bdaddr, bdaddr);
1929 list_add(&data->list, &hdev->remote_oob_data);
1932 memcpy(data->hash, hash, sizeof(data->hash));
1933 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1935 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1940 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1942 struct bdaddr_list *b;
1944 list_for_each_entry(b, &hdev->blacklist, list)
1945 if (bacmp(bdaddr, &b->bdaddr) == 0)
1951 int hci_blacklist_clear(struct hci_dev *hdev)
1953 struct list_head *p, *n;
1955 list_for_each_safe(p, n, &hdev->blacklist) {
1956 struct bdaddr_list *b;
1958 b = list_entry(p, struct bdaddr_list, list);
1967 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1969 struct bdaddr_list *entry;
1971 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1974 if (hci_blacklist_lookup(hdev, bdaddr))
1977 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1981 bacpy(&entry->bdaddr, bdaddr);
1983 list_add(&entry->list, &hdev->blacklist);
1985 return mgmt_device_blocked(hdev, bdaddr, type);
1988 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1990 struct bdaddr_list *entry;
1992 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1993 return hci_blacklist_clear(hdev);
1995 entry = hci_blacklist_lookup(hdev, bdaddr);
1999 list_del(&entry->list);
2002 return mgmt_device_unblocked(hdev, bdaddr, type);
2005 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
2007 struct le_scan_params *param = (struct le_scan_params *) opt;
2008 struct hci_cp_le_set_scan_param cp;
2010 memset(&cp, 0, sizeof(cp));
2011 cp.type = param->type;
2012 cp.interval = cpu_to_le16(param->interval);
2013 cp.window = cpu_to_le16(param->window);
2015 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
2018 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
2020 struct hci_cp_le_set_scan_enable cp;
2022 memset(&cp, 0, sizeof(cp));
2023 cp.enable = LE_SCAN_ENABLE;
2024 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2026 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2029 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
2030 u16 window, int timeout)
2032 long timeo = msecs_to_jiffies(3000);
2033 struct le_scan_params param;
2036 BT_DBG("%s", hdev->name);
2038 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2039 return -EINPROGRESS;
2042 param.interval = interval;
2043 param.window = window;
2047 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) ¶m,
2050 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
2052 hci_req_unlock(hdev);
2057 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2063 int hci_cancel_le_scan(struct hci_dev *hdev)
2065 BT_DBG("%s", hdev->name);
2067 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2070 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2071 struct hci_cp_le_set_scan_enable cp;
2073 /* Send HCI command to disable LE Scan */
2074 memset(&cp, 0, sizeof(cp));
2075 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2081 static void le_scan_disable_work(struct work_struct *work)
2083 struct hci_dev *hdev = container_of(work, struct hci_dev,
2084 le_scan_disable.work);
2085 struct hci_cp_le_set_scan_enable cp;
2087 BT_DBG("%s", hdev->name);
2089 memset(&cp, 0, sizeof(cp));
2091 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2094 static void le_scan_work(struct work_struct *work)
2096 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2097 struct le_scan_params *param = &hdev->le_scan_params;
2099 BT_DBG("%s", hdev->name);
2101 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2105 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
2108 struct le_scan_params *param = &hdev->le_scan_params;
2110 BT_DBG("%s", hdev->name);
2112 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2115 if (work_busy(&hdev->le_scan))
2116 return -EINPROGRESS;
2119 param->interval = interval;
2120 param->window = window;
2121 param->timeout = timeout;
2123 queue_work(system_long_wq, &hdev->le_scan);
2128 /* Alloc HCI device */
2129 struct hci_dev *hci_alloc_dev(void)
2131 struct hci_dev *hdev;
2133 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2137 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2138 hdev->esco_type = (ESCO_HV1);
2139 hdev->link_mode = (HCI_LM_ACCEPT);
2140 hdev->io_capability = 0x03; /* No Input No Output */
2141 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2142 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2144 hdev->sniff_max_interval = 800;
2145 hdev->sniff_min_interval = 80;
2147 mutex_init(&hdev->lock);
2148 mutex_init(&hdev->req_lock);
2150 INIT_LIST_HEAD(&hdev->mgmt_pending);
2151 INIT_LIST_HEAD(&hdev->blacklist);
2152 INIT_LIST_HEAD(&hdev->uuids);
2153 INIT_LIST_HEAD(&hdev->link_keys);
2154 INIT_LIST_HEAD(&hdev->long_term_keys);
2155 INIT_LIST_HEAD(&hdev->remote_oob_data);
2156 INIT_LIST_HEAD(&hdev->conn_hash.list);
2158 INIT_WORK(&hdev->rx_work, hci_rx_work);
2159 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2160 INIT_WORK(&hdev->tx_work, hci_tx_work);
2161 INIT_WORK(&hdev->power_on, hci_power_on);
2162 INIT_WORK(&hdev->le_scan, le_scan_work);
2164 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2165 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2166 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2168 skb_queue_head_init(&hdev->rx_q);
2169 skb_queue_head_init(&hdev->cmd_q);
2170 skb_queue_head_init(&hdev->raw_q);
2172 init_waitqueue_head(&hdev->req_wait_q);
2174 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2176 hci_init_sysfs(hdev);
2177 discovery_init(hdev);
2181 EXPORT_SYMBOL(hci_alloc_dev);
2183 /* Free HCI device */
2184 void hci_free_dev(struct hci_dev *hdev)
2186 /* will free via device release */
2187 put_device(&hdev->dev);
2189 EXPORT_SYMBOL(hci_free_dev);
2191 /* Register HCI device */
2192 int hci_register_dev(struct hci_dev *hdev)
2196 if (!hdev->open || !hdev->close)
2199 /* Do not allow HCI_AMP devices to register at index 0,
2200 * so the index can be used as the AMP controller ID.
2202 switch (hdev->dev_type) {
2204 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2207 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2216 sprintf(hdev->name, "hci%d", id);
2219 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2221 write_lock(&hci_dev_list_lock);
2222 list_add(&hdev->list, &hci_dev_list);
2223 write_unlock(&hci_dev_list_lock);
2225 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2227 if (!hdev->workqueue) {
2232 hdev->req_workqueue = alloc_workqueue(hdev->name,
2233 WQ_HIGHPRI | WQ_UNBOUND |
2235 if (!hdev->req_workqueue) {
2236 destroy_workqueue(hdev->workqueue);
2241 error = hci_add_sysfs(hdev);
2245 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2246 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2249 if (rfkill_register(hdev->rfkill) < 0) {
2250 rfkill_destroy(hdev->rfkill);
2251 hdev->rfkill = NULL;
2255 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2256 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2258 set_bit(HCI_SETUP, &hdev->dev_flags);
2260 if (hdev->dev_type != HCI_AMP)
2261 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2263 hci_notify(hdev, HCI_DEV_REG);
2266 queue_work(hdev->req_workqueue, &hdev->power_on);
2271 destroy_workqueue(hdev->workqueue);
2272 destroy_workqueue(hdev->req_workqueue);
2274 ida_simple_remove(&hci_index_ida, hdev->id);
2275 write_lock(&hci_dev_list_lock);
2276 list_del(&hdev->list);
2277 write_unlock(&hci_dev_list_lock);
2281 EXPORT_SYMBOL(hci_register_dev);
2283 /* Unregister HCI device */
2284 void hci_unregister_dev(struct hci_dev *hdev)
2288 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2290 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2294 write_lock(&hci_dev_list_lock);
2295 list_del(&hdev->list);
2296 write_unlock(&hci_dev_list_lock);
2298 hci_dev_do_close(hdev);
2300 for (i = 0; i < NUM_REASSEMBLY; i++)
2301 kfree_skb(hdev->reassembly[i]);
2303 cancel_work_sync(&hdev->power_on);
2305 if (!test_bit(HCI_INIT, &hdev->flags) &&
2306 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2308 mgmt_index_removed(hdev);
2309 hci_dev_unlock(hdev);
2312 /* mgmt_index_removed should take care of emptying the
2314 BUG_ON(!list_empty(&hdev->mgmt_pending));
2316 hci_notify(hdev, HCI_DEV_UNREG);
2319 rfkill_unregister(hdev->rfkill);
2320 rfkill_destroy(hdev->rfkill);
2323 hci_del_sysfs(hdev);
2325 destroy_workqueue(hdev->workqueue);
2326 destroy_workqueue(hdev->req_workqueue);
2329 hci_blacklist_clear(hdev);
2330 hci_uuids_clear(hdev);
2331 hci_link_keys_clear(hdev);
2332 hci_smp_ltks_clear(hdev);
2333 hci_remote_oob_data_clear(hdev);
2334 hci_dev_unlock(hdev);
2338 ida_simple_remove(&hci_index_ida, id);
2340 EXPORT_SYMBOL(hci_unregister_dev);
2342 /* Suspend HCI device */
2343 int hci_suspend_dev(struct hci_dev *hdev)
2345 hci_notify(hdev, HCI_DEV_SUSPEND);
2348 EXPORT_SYMBOL(hci_suspend_dev);
2350 /* Resume HCI device */
2351 int hci_resume_dev(struct hci_dev *hdev)
2353 hci_notify(hdev, HCI_DEV_RESUME);
2356 EXPORT_SYMBOL(hci_resume_dev);
2358 /* Receive frame from HCI drivers */
2359 int hci_recv_frame(struct sk_buff *skb)
2361 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2362 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2363 && !test_bit(HCI_INIT, &hdev->flags))) {
2369 bt_cb(skb)->incoming = 1;
2372 __net_timestamp(skb);
2374 skb_queue_tail(&hdev->rx_q, skb);
2375 queue_work(hdev->workqueue, &hdev->rx_work);
2379 EXPORT_SYMBOL(hci_recv_frame);
2381 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2382 int count, __u8 index)
2387 struct sk_buff *skb;
2388 struct bt_skb_cb *scb;
2390 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2391 index >= NUM_REASSEMBLY)
2394 skb = hdev->reassembly[index];
2398 case HCI_ACLDATA_PKT:
2399 len = HCI_MAX_FRAME_SIZE;
2400 hlen = HCI_ACL_HDR_SIZE;
2403 len = HCI_MAX_EVENT_SIZE;
2404 hlen = HCI_EVENT_HDR_SIZE;
2406 case HCI_SCODATA_PKT:
2407 len = HCI_MAX_SCO_SIZE;
2408 hlen = HCI_SCO_HDR_SIZE;
2412 skb = bt_skb_alloc(len, GFP_ATOMIC);
2416 scb = (void *) skb->cb;
2418 scb->pkt_type = type;
2420 skb->dev = (void *) hdev;
2421 hdev->reassembly[index] = skb;
2425 scb = (void *) skb->cb;
2426 len = min_t(uint, scb->expect, count);
2428 memcpy(skb_put(skb, len), data, len);
2437 if (skb->len == HCI_EVENT_HDR_SIZE) {
2438 struct hci_event_hdr *h = hci_event_hdr(skb);
2439 scb->expect = h->plen;
2441 if (skb_tailroom(skb) < scb->expect) {
2443 hdev->reassembly[index] = NULL;
2449 case HCI_ACLDATA_PKT:
2450 if (skb->len == HCI_ACL_HDR_SIZE) {
2451 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2452 scb->expect = __le16_to_cpu(h->dlen);
2454 if (skb_tailroom(skb) < scb->expect) {
2456 hdev->reassembly[index] = NULL;
2462 case HCI_SCODATA_PKT:
2463 if (skb->len == HCI_SCO_HDR_SIZE) {
2464 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2465 scb->expect = h->dlen;
2467 if (skb_tailroom(skb) < scb->expect) {
2469 hdev->reassembly[index] = NULL;
2476 if (scb->expect == 0) {
2477 /* Complete frame */
2479 bt_cb(skb)->pkt_type = type;
2480 hci_recv_frame(skb);
2482 hdev->reassembly[index] = NULL;
2490 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2494 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2498 rem = hci_reassembly(hdev, type, data, count, type - 1);
2502 data += (count - rem);
2508 EXPORT_SYMBOL(hci_recv_fragment);
2510 #define STREAM_REASSEMBLY 0
2512 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2518 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2521 struct { char type; } *pkt;
2523 /* Start of the frame */
2530 type = bt_cb(skb)->pkt_type;
2532 rem = hci_reassembly(hdev, type, data, count,
2537 data += (count - rem);
2543 EXPORT_SYMBOL(hci_recv_stream_fragment);
2545 /* ---- Interface to upper protocols ---- */
2547 int hci_register_cb(struct hci_cb *cb)
2549 BT_DBG("%p name %s", cb, cb->name);
2551 write_lock(&hci_cb_list_lock);
2552 list_add(&cb->list, &hci_cb_list);
2553 write_unlock(&hci_cb_list_lock);
2557 EXPORT_SYMBOL(hci_register_cb);
2559 int hci_unregister_cb(struct hci_cb *cb)
2561 BT_DBG("%p name %s", cb, cb->name);
2563 write_lock(&hci_cb_list_lock);
2564 list_del(&cb->list);
2565 write_unlock(&hci_cb_list_lock);
2569 EXPORT_SYMBOL(hci_unregister_cb);
2571 static int hci_send_frame(struct sk_buff *skb)
2573 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2580 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2583 __net_timestamp(skb);
2585 /* Send copy to monitor */
2586 hci_send_to_monitor(hdev, skb);
2588 if (atomic_read(&hdev->promisc)) {
2589 /* Send copy to the sockets */
2590 hci_send_to_sock(hdev, skb);
2593 /* Get rid of skb owner, prior to sending to the driver. */
2596 return hdev->send(skb);
2599 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2601 skb_queue_head_init(&req->cmd_q);
2606 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2608 struct hci_dev *hdev = req->hdev;
2609 struct sk_buff *skb;
2610 unsigned long flags;
2612 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2614 /* If an error occured during request building, remove all HCI
2615 * commands queued on the HCI request queue.
2618 skb_queue_purge(&req->cmd_q);
2622 /* Do not allow empty requests */
2623 if (skb_queue_empty(&req->cmd_q))
2626 skb = skb_peek_tail(&req->cmd_q);
2627 bt_cb(skb)->req.complete = complete;
2629 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2630 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2631 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2633 queue_work(hdev->workqueue, &hdev->cmd_work);
2638 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2639 u32 plen, const void *param)
2641 int len = HCI_COMMAND_HDR_SIZE + plen;
2642 struct hci_command_hdr *hdr;
2643 struct sk_buff *skb;
2645 skb = bt_skb_alloc(len, GFP_ATOMIC);
2649 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2650 hdr->opcode = cpu_to_le16(opcode);
2654 memcpy(skb_put(skb, plen), param, plen);
2656 BT_DBG("skb len %d", skb->len);
2658 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2659 skb->dev = (void *) hdev;
2664 /* Send HCI command */
2665 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2668 struct sk_buff *skb;
2670 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2672 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2674 BT_ERR("%s no memory for command", hdev->name);
2678 /* Stand-alone HCI commands must be flaged as
2679 * single-command requests.
2681 bt_cb(skb)->req.start = true;
2683 skb_queue_tail(&hdev->cmd_q, skb);
2684 queue_work(hdev->workqueue, &hdev->cmd_work);
2689 /* Queue a command to an asynchronous HCI request */
2690 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2691 const void *param, u8 event)
2693 struct hci_dev *hdev = req->hdev;
2694 struct sk_buff *skb;
2696 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2698 /* If an error occured during request building, there is no point in
2699 * queueing the HCI command. We can simply return.
2704 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2706 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2707 hdev->name, opcode);
2712 if (skb_queue_empty(&req->cmd_q))
2713 bt_cb(skb)->req.start = true;
2715 bt_cb(skb)->req.event = event;
2717 skb_queue_tail(&req->cmd_q, skb);
2720 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2723 hci_req_add_ev(req, opcode, plen, param, 0);
2726 /* Get data from the previously sent command */
2727 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2729 struct hci_command_hdr *hdr;
2731 if (!hdev->sent_cmd)
2734 hdr = (void *) hdev->sent_cmd->data;
2736 if (hdr->opcode != cpu_to_le16(opcode))
2739 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2741 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2745 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2747 struct hci_acl_hdr *hdr;
2750 skb_push(skb, HCI_ACL_HDR_SIZE);
2751 skb_reset_transport_header(skb);
2752 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2753 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2754 hdr->dlen = cpu_to_le16(len);
2757 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2758 struct sk_buff *skb, __u16 flags)
2760 struct hci_conn *conn = chan->conn;
2761 struct hci_dev *hdev = conn->hdev;
2762 struct sk_buff *list;
2764 skb->len = skb_headlen(skb);
2767 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2769 switch (hdev->dev_type) {
2771 hci_add_acl_hdr(skb, conn->handle, flags);
2774 hci_add_acl_hdr(skb, chan->handle, flags);
2777 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2781 list = skb_shinfo(skb)->frag_list;
2783 /* Non fragmented */
2784 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2786 skb_queue_tail(queue, skb);
2789 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2791 skb_shinfo(skb)->frag_list = NULL;
2793 /* Queue all fragments atomically */
2794 spin_lock(&queue->lock);
2796 __skb_queue_tail(queue, skb);
2798 flags &= ~ACL_START;
2801 skb = list; list = list->next;
2803 skb->dev = (void *) hdev;
2804 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2805 hci_add_acl_hdr(skb, conn->handle, flags);
2807 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2809 __skb_queue_tail(queue, skb);
2812 spin_unlock(&queue->lock);
2816 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2818 struct hci_dev *hdev = chan->conn->hdev;
2820 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2822 skb->dev = (void *) hdev;
2824 hci_queue_acl(chan, &chan->data_q, skb, flags);
2826 queue_work(hdev->workqueue, &hdev->tx_work);
2830 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2832 struct hci_dev *hdev = conn->hdev;
2833 struct hci_sco_hdr hdr;
2835 BT_DBG("%s len %d", hdev->name, skb->len);
2837 hdr.handle = cpu_to_le16(conn->handle);
2838 hdr.dlen = skb->len;
2840 skb_push(skb, HCI_SCO_HDR_SIZE);
2841 skb_reset_transport_header(skb);
2842 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2844 skb->dev = (void *) hdev;
2845 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2847 skb_queue_tail(&conn->data_q, skb);
2848 queue_work(hdev->workqueue, &hdev->tx_work);
2851 /* ---- HCI TX task (outgoing data) ---- */
2853 /* HCI Connection scheduler */
2854 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2857 struct hci_conn_hash *h = &hdev->conn_hash;
2858 struct hci_conn *conn = NULL, *c;
2859 unsigned int num = 0, min = ~0;
2861 /* We don't have to lock device here. Connections are always
2862 * added and removed with TX task disabled. */
2866 list_for_each_entry_rcu(c, &h->list, list) {
2867 if (c->type != type || skb_queue_empty(&c->data_q))
2870 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2875 if (c->sent < min) {
2880 if (hci_conn_num(hdev, type) == num)
2889 switch (conn->type) {
2891 cnt = hdev->acl_cnt;
2895 cnt = hdev->sco_cnt;
2898 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2902 BT_ERR("Unknown link type");
2910 BT_DBG("conn %p quote %d", conn, *quote);
2914 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2916 struct hci_conn_hash *h = &hdev->conn_hash;
2919 BT_ERR("%s link tx timeout", hdev->name);
2923 /* Kill stalled connections */
2924 list_for_each_entry_rcu(c, &h->list, list) {
2925 if (c->type == type && c->sent) {
2926 BT_ERR("%s killing stalled connection %pMR",
2927 hdev->name, &c->dst);
2928 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2935 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2938 struct hci_conn_hash *h = &hdev->conn_hash;
2939 struct hci_chan *chan = NULL;
2940 unsigned int num = 0, min = ~0, cur_prio = 0;
2941 struct hci_conn *conn;
2942 int cnt, q, conn_num = 0;
2944 BT_DBG("%s", hdev->name);
2948 list_for_each_entry_rcu(conn, &h->list, list) {
2949 struct hci_chan *tmp;
2951 if (conn->type != type)
2954 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2959 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2960 struct sk_buff *skb;
2962 if (skb_queue_empty(&tmp->data_q))
2965 skb = skb_peek(&tmp->data_q);
2966 if (skb->priority < cur_prio)
2969 if (skb->priority > cur_prio) {
2972 cur_prio = skb->priority;
2977 if (conn->sent < min) {
2983 if (hci_conn_num(hdev, type) == conn_num)
2992 switch (chan->conn->type) {
2994 cnt = hdev->acl_cnt;
2997 cnt = hdev->block_cnt;
3001 cnt = hdev->sco_cnt;
3004 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3008 BT_ERR("Unknown link type");
3013 BT_DBG("chan %p quote %d", chan, *quote);
3017 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3019 struct hci_conn_hash *h = &hdev->conn_hash;
3020 struct hci_conn *conn;
3023 BT_DBG("%s", hdev->name);
3027 list_for_each_entry_rcu(conn, &h->list, list) {
3028 struct hci_chan *chan;
3030 if (conn->type != type)
3033 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3038 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3039 struct sk_buff *skb;
3046 if (skb_queue_empty(&chan->data_q))
3049 skb = skb_peek(&chan->data_q);
3050 if (skb->priority >= HCI_PRIO_MAX - 1)
3053 skb->priority = HCI_PRIO_MAX - 1;
3055 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3059 if (hci_conn_num(hdev, type) == num)
3067 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3069 /* Calculate count of blocks used by this packet */
3070 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3073 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3075 if (!test_bit(HCI_RAW, &hdev->flags)) {
3076 /* ACL tx timeout must be longer than maximum
3077 * link supervision timeout (40.9 seconds) */
3078 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3079 HCI_ACL_TX_TIMEOUT))
3080 hci_link_tx_to(hdev, ACL_LINK);
3084 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3086 unsigned int cnt = hdev->acl_cnt;
3087 struct hci_chan *chan;
3088 struct sk_buff *skb;
3091 __check_timeout(hdev, cnt);
3093 while (hdev->acl_cnt &&
3094 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3095 u32 priority = (skb_peek(&chan->data_q))->priority;
3096 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3097 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3098 skb->len, skb->priority);
3100 /* Stop if priority has changed */
3101 if (skb->priority < priority)
3104 skb = skb_dequeue(&chan->data_q);
3106 hci_conn_enter_active_mode(chan->conn,
3107 bt_cb(skb)->force_active);
3109 hci_send_frame(skb);
3110 hdev->acl_last_tx = jiffies;
3118 if (cnt != hdev->acl_cnt)
3119 hci_prio_recalculate(hdev, ACL_LINK);
3122 static void hci_sched_acl_blk(struct hci_dev *hdev)
3124 unsigned int cnt = hdev->block_cnt;
3125 struct hci_chan *chan;
3126 struct sk_buff *skb;
3130 __check_timeout(hdev, cnt);
3132 BT_DBG("%s", hdev->name);
3134 if (hdev->dev_type == HCI_AMP)
3139 while (hdev->block_cnt > 0 &&
3140 (chan = hci_chan_sent(hdev, type, "e))) {
3141 u32 priority = (skb_peek(&chan->data_q))->priority;
3142 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3145 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3146 skb->len, skb->priority);
3148 /* Stop if priority has changed */
3149 if (skb->priority < priority)
3152 skb = skb_dequeue(&chan->data_q);
3154 blocks = __get_blocks(hdev, skb);
3155 if (blocks > hdev->block_cnt)
3158 hci_conn_enter_active_mode(chan->conn,
3159 bt_cb(skb)->force_active);
3161 hci_send_frame(skb);
3162 hdev->acl_last_tx = jiffies;
3164 hdev->block_cnt -= blocks;
3167 chan->sent += blocks;
3168 chan->conn->sent += blocks;
3172 if (cnt != hdev->block_cnt)
3173 hci_prio_recalculate(hdev, type);
3176 static void hci_sched_acl(struct hci_dev *hdev)
3178 BT_DBG("%s", hdev->name);
3180 /* No ACL link over BR/EDR controller */
3181 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3184 /* No AMP link over AMP controller */
3185 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3188 switch (hdev->flow_ctl_mode) {
3189 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3190 hci_sched_acl_pkt(hdev);
3193 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3194 hci_sched_acl_blk(hdev);
3200 static void hci_sched_sco(struct hci_dev *hdev)
3202 struct hci_conn *conn;
3203 struct sk_buff *skb;
3206 BT_DBG("%s", hdev->name);
3208 if (!hci_conn_num(hdev, SCO_LINK))
3211 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3212 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3213 BT_DBG("skb %p len %d", skb, skb->len);
3214 hci_send_frame(skb);
3217 if (conn->sent == ~0)
3223 static void hci_sched_esco(struct hci_dev *hdev)
3225 struct hci_conn *conn;
3226 struct sk_buff *skb;
3229 BT_DBG("%s", hdev->name);
3231 if (!hci_conn_num(hdev, ESCO_LINK))
3234 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3236 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3237 BT_DBG("skb %p len %d", skb, skb->len);
3238 hci_send_frame(skb);
3241 if (conn->sent == ~0)
3247 static void hci_sched_le(struct hci_dev *hdev)
3249 struct hci_chan *chan;
3250 struct sk_buff *skb;
3251 int quote, cnt, tmp;
3253 BT_DBG("%s", hdev->name);
3255 if (!hci_conn_num(hdev, LE_LINK))
3258 if (!test_bit(HCI_RAW, &hdev->flags)) {
3259 /* LE tx timeout must be longer than maximum
3260 * link supervision timeout (40.9 seconds) */
3261 if (!hdev->le_cnt && hdev->le_pkts &&
3262 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3263 hci_link_tx_to(hdev, LE_LINK);
3266 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3268 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3269 u32 priority = (skb_peek(&chan->data_q))->priority;
3270 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3271 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3272 skb->len, skb->priority);
3274 /* Stop if priority has changed */
3275 if (skb->priority < priority)
3278 skb = skb_dequeue(&chan->data_q);
3280 hci_send_frame(skb);
3281 hdev->le_last_tx = jiffies;
3292 hdev->acl_cnt = cnt;
3295 hci_prio_recalculate(hdev, LE_LINK);
3298 static void hci_tx_work(struct work_struct *work)
3300 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3301 struct sk_buff *skb;
3303 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3304 hdev->sco_cnt, hdev->le_cnt);
3306 /* Schedule queues and send stuff to HCI driver */
3308 hci_sched_acl(hdev);
3310 hci_sched_sco(hdev);
3312 hci_sched_esco(hdev);
3316 /* Send next queued raw (unknown type) packet */
3317 while ((skb = skb_dequeue(&hdev->raw_q)))
3318 hci_send_frame(skb);
3321 /* ----- HCI RX task (incoming data processing) ----- */
3323 /* ACL data packet */
3324 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3326 struct hci_acl_hdr *hdr = (void *) skb->data;
3327 struct hci_conn *conn;
3328 __u16 handle, flags;
3330 skb_pull(skb, HCI_ACL_HDR_SIZE);
3332 handle = __le16_to_cpu(hdr->handle);
3333 flags = hci_flags(handle);
3334 handle = hci_handle(handle);
3336 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3339 hdev->stat.acl_rx++;
3342 conn = hci_conn_hash_lookup_handle(hdev, handle);
3343 hci_dev_unlock(hdev);
3346 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3348 /* Send to upper protocol */
3349 l2cap_recv_acldata(conn, skb, flags);
3352 BT_ERR("%s ACL packet for unknown connection handle %d",
3353 hdev->name, handle);
3359 /* SCO data packet */
3360 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3362 struct hci_sco_hdr *hdr = (void *) skb->data;
3363 struct hci_conn *conn;
3366 skb_pull(skb, HCI_SCO_HDR_SIZE);
3368 handle = __le16_to_cpu(hdr->handle);
3370 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3372 hdev->stat.sco_rx++;
3375 conn = hci_conn_hash_lookup_handle(hdev, handle);
3376 hci_dev_unlock(hdev);
3379 /* Send to upper protocol */
3380 sco_recv_scodata(conn, skb);
3383 BT_ERR("%s SCO packet for unknown connection handle %d",
3384 hdev->name, handle);
3390 static bool hci_req_is_complete(struct hci_dev *hdev)
3392 struct sk_buff *skb;
3394 skb = skb_peek(&hdev->cmd_q);
3398 return bt_cb(skb)->req.start;
3401 static void hci_resend_last(struct hci_dev *hdev)
3403 struct hci_command_hdr *sent;
3404 struct sk_buff *skb;
3407 if (!hdev->sent_cmd)
3410 sent = (void *) hdev->sent_cmd->data;
3411 opcode = __le16_to_cpu(sent->opcode);
3412 if (opcode == HCI_OP_RESET)
3415 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3419 skb_queue_head(&hdev->cmd_q, skb);
3420 queue_work(hdev->workqueue, &hdev->cmd_work);
3423 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3425 hci_req_complete_t req_complete = NULL;
3426 struct sk_buff *skb;
3427 unsigned long flags;
3429 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3431 /* If the completed command doesn't match the last one that was
3432 * sent we need to do special handling of it.
3434 if (!hci_sent_cmd_data(hdev, opcode)) {
3435 /* Some CSR based controllers generate a spontaneous
3436 * reset complete event during init and any pending
3437 * command will never be completed. In such a case we
3438 * need to resend whatever was the last sent
3441 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3442 hci_resend_last(hdev);
3447 /* If the command succeeded and there's still more commands in
3448 * this request the request is not yet complete.
3450 if (!status && !hci_req_is_complete(hdev))
3453 /* If this was the last command in a request the complete
3454 * callback would be found in hdev->sent_cmd instead of the
3455 * command queue (hdev->cmd_q).
3457 if (hdev->sent_cmd) {
3458 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3463 /* Remove all pending commands belonging to this request */
3464 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3465 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3466 if (bt_cb(skb)->req.start) {
3467 __skb_queue_head(&hdev->cmd_q, skb);
3471 req_complete = bt_cb(skb)->req.complete;
3474 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3478 req_complete(hdev, status);
3481 static void hci_rx_work(struct work_struct *work)
3483 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3484 struct sk_buff *skb;
3486 BT_DBG("%s", hdev->name);
3488 while ((skb = skb_dequeue(&hdev->rx_q))) {
3489 /* Send copy to monitor */
3490 hci_send_to_monitor(hdev, skb);
3492 if (atomic_read(&hdev->promisc)) {
3493 /* Send copy to the sockets */
3494 hci_send_to_sock(hdev, skb);
3497 if (test_bit(HCI_RAW, &hdev->flags)) {
3502 if (test_bit(HCI_INIT, &hdev->flags)) {
3503 /* Don't process data packets in this states. */
3504 switch (bt_cb(skb)->pkt_type) {
3505 case HCI_ACLDATA_PKT:
3506 case HCI_SCODATA_PKT:
3513 switch (bt_cb(skb)->pkt_type) {
3515 BT_DBG("%s Event packet", hdev->name);
3516 hci_event_packet(hdev, skb);
3519 case HCI_ACLDATA_PKT:
3520 BT_DBG("%s ACL data packet", hdev->name);
3521 hci_acldata_packet(hdev, skb);
3524 case HCI_SCODATA_PKT:
3525 BT_DBG("%s SCO data packet", hdev->name);
3526 hci_scodata_packet(hdev, skb);
3536 static void hci_cmd_work(struct work_struct *work)
3538 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3539 struct sk_buff *skb;
3541 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3542 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3544 /* Send queued commands */
3545 if (atomic_read(&hdev->cmd_cnt)) {
3546 skb = skb_dequeue(&hdev->cmd_q);
3550 kfree_skb(hdev->sent_cmd);
3552 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3553 if (hdev->sent_cmd) {
3554 atomic_dec(&hdev->cmd_cnt);
3555 hci_send_frame(skb);
3556 if (test_bit(HCI_RESET, &hdev->flags))
3557 del_timer(&hdev->cmd_timer);
3559 mod_timer(&hdev->cmd_timer,
3560 jiffies + HCI_CMD_TIMEOUT);
3562 skb_queue_head(&hdev->cmd_q, skb);
3563 queue_work(hdev->workqueue, &hdev->cmd_work);
3568 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3570 /* General inquiry access code (GIAC) */
3571 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3572 struct hci_cp_inquiry cp;
3574 BT_DBG("%s", hdev->name);
3576 if (test_bit(HCI_INQUIRY, &hdev->flags))
3577 return -EINPROGRESS;
3579 inquiry_cache_flush(hdev);
3581 memset(&cp, 0, sizeof(cp));
3582 memcpy(&cp.lap, lap, sizeof(cp.lap));
3585 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3588 int hci_cancel_inquiry(struct hci_dev *hdev)
3590 BT_DBG("%s", hdev->name);
3592 if (!test_bit(HCI_INQUIRY, &hdev->flags))
3595 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3598 u8 bdaddr_to_le(u8 bdaddr_type)
3600 switch (bdaddr_type) {
3601 case BDADDR_LE_PUBLIC:
3602 return ADDR_LE_DEV_PUBLIC;
3605 /* Fallback to LE Random address type */
3606 return ADDR_LE_DEV_RANDOM;