2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 /* Execute request and wait for completion. */
83 static int __hci_req_sync(struct hci_dev *hdev,
84 void (*func)(struct hci_request *req,
86 unsigned long opt, __u32 timeout)
88 struct hci_request req;
89 DECLARE_WAITQUEUE(wait, current);
92 BT_DBG("%s start", hdev->name);
94 hci_req_init(&req, hdev);
96 hdev->req_status = HCI_REQ_PEND;
98 add_wait_queue(&hdev->req_wait_q, &wait);
99 set_current_state(TASK_INTERRUPTIBLE);
103 err = hci_req_run(&req, hci_req_sync_complete);
105 hdev->req_status = 0;
106 remove_wait_queue(&hdev->req_wait_q, &wait);
107 /* req_run will fail if the request did not add any
108 * commands to the queue, something that can happen when
109 * a request with conditionals doesn't trigger any
110 * commands to be sent. This is normal behavior and
111 * should not trigger an error return.
116 schedule_timeout(timeout);
118 remove_wait_queue(&hdev->req_wait_q, &wait);
120 if (signal_pending(current))
123 switch (hdev->req_status) {
125 err = -bt_to_errno(hdev->req_result);
128 case HCI_REQ_CANCELED:
129 err = -hdev->req_result;
137 hdev->req_status = hdev->req_result = 0;
139 BT_DBG("%s end: err %d", hdev->name, err);
144 static int hci_req_sync(struct hci_dev *hdev,
145 void (*req)(struct hci_request *req,
147 unsigned long opt, __u32 timeout)
151 if (!test_bit(HCI_UP, &hdev->flags))
154 /* Serialize all requests */
156 ret = __hci_req_sync(hdev, req, opt, timeout);
157 hci_req_unlock(hdev);
162 static void hci_reset_req(struct hci_request *req, unsigned long opt)
164 BT_DBG("%s %ld", req->hdev->name, opt);
167 set_bit(HCI_RESET, &req->hdev->flags);
168 hci_req_add(req, HCI_OP_RESET, 0, NULL);
171 static void bredr_init(struct hci_request *req)
173 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
175 /* Read Local Supported Features */
176 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
178 /* Read Local Version */
179 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
181 /* Read BD Address */
182 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
185 static void amp_init(struct hci_request *req)
187 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
189 /* Read Local Version */
190 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
192 /* Read Local AMP Info */
193 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
195 /* Read Data Blk size */
196 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
199 static void hci_init1_req(struct hci_request *req, unsigned long opt)
201 struct hci_dev *hdev = req->hdev;
202 struct hci_request init_req;
205 BT_DBG("%s %ld", hdev->name, opt);
207 /* Driver initialization */
209 hci_req_init(&init_req, hdev);
211 /* Special commands */
212 while ((skb = skb_dequeue(&hdev->driver_init))) {
213 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
214 skb->dev = (void *) hdev;
216 if (skb_queue_empty(&init_req.cmd_q))
217 bt_cb(skb)->req.start = true;
219 skb_queue_tail(&init_req.cmd_q, skb);
221 skb_queue_purge(&hdev->driver_init);
223 hci_req_run(&init_req, NULL);
226 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
227 hci_reset_req(req, 0);
229 switch (hdev->dev_type) {
239 BT_ERR("Unknown device type %d", hdev->dev_type);
244 static void bredr_setup(struct hci_request *req)
246 struct hci_cp_delete_stored_link_key cp;
250 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
251 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
253 /* Read Class of Device */
254 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
256 /* Read Local Name */
257 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
259 /* Read Voice Setting */
260 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
262 /* Clear Event Filters */
263 flt_type = HCI_FLT_CLEAR_ALL;
264 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
266 /* Connection accept timeout ~20 secs */
267 param = __constant_cpu_to_le16(0x7d00);
268 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
270 bacpy(&cp.bdaddr, BDADDR_ANY);
271 cp.delete_all = 0x01;
272 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
275 static void le_setup(struct hci_request *req)
277 /* Read LE Buffer Size */
278 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
280 /* Read LE Local Supported Features */
281 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
283 /* Read LE Advertising Channel TX Power */
284 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
286 /* Read LE White List Size */
287 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
289 /* Read LE Supported States */
290 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
293 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
295 if (lmp_ext_inq_capable(hdev))
298 if (lmp_inq_rssi_capable(hdev))
301 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
302 hdev->lmp_subver == 0x0757)
305 if (hdev->manufacturer == 15) {
306 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
308 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
310 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
314 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
315 hdev->lmp_subver == 0x1805)
321 static void hci_setup_inquiry_mode(struct hci_request *req)
325 mode = hci_get_inquiry_mode(req->hdev);
327 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
330 static void hci_setup_event_mask(struct hci_request *req)
332 struct hci_dev *hdev = req->hdev;
334 /* The second byte is 0xff instead of 0x9f (two reserved bits
335 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
338 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
340 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
341 * any event mask for pre 1.2 devices.
343 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
346 if (lmp_bredr_capable(hdev)) {
347 events[4] |= 0x01; /* Flow Specification Complete */
348 events[4] |= 0x02; /* Inquiry Result with RSSI */
349 events[4] |= 0x04; /* Read Remote Extended Features Complete */
350 events[5] |= 0x08; /* Synchronous Connection Complete */
351 events[5] |= 0x10; /* Synchronous Connection Changed */
354 if (lmp_inq_rssi_capable(hdev))
355 events[4] |= 0x02; /* Inquiry Result with RSSI */
357 if (lmp_sniffsubr_capable(hdev))
358 events[5] |= 0x20; /* Sniff Subrating */
360 if (lmp_pause_enc_capable(hdev))
361 events[5] |= 0x80; /* Encryption Key Refresh Complete */
363 if (lmp_ext_inq_capable(hdev))
364 events[5] |= 0x40; /* Extended Inquiry Result */
366 if (lmp_no_flush_capable(hdev))
367 events[7] |= 0x01; /* Enhanced Flush Complete */
369 if (lmp_lsto_capable(hdev))
370 events[6] |= 0x80; /* Link Supervision Timeout Changed */
372 if (lmp_ssp_capable(hdev)) {
373 events[6] |= 0x01; /* IO Capability Request */
374 events[6] |= 0x02; /* IO Capability Response */
375 events[6] |= 0x04; /* User Confirmation Request */
376 events[6] |= 0x08; /* User Passkey Request */
377 events[6] |= 0x10; /* Remote OOB Data Request */
378 events[6] |= 0x20; /* Simple Pairing Complete */
379 events[7] |= 0x04; /* User Passkey Notification */
380 events[7] |= 0x08; /* Keypress Notification */
381 events[7] |= 0x10; /* Remote Host Supported
382 * Features Notification
386 if (lmp_le_capable(hdev))
387 events[7] |= 0x20; /* LE Meta-Event */
389 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
391 if (lmp_le_capable(hdev)) {
392 memset(events, 0, sizeof(events));
394 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
395 sizeof(events), events);
399 static void hci_init2_req(struct hci_request *req, unsigned long opt)
401 struct hci_dev *hdev = req->hdev;
403 if (lmp_bredr_capable(hdev))
406 if (lmp_le_capable(hdev))
409 hci_setup_event_mask(req);
411 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
412 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
414 if (lmp_ssp_capable(hdev)) {
415 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
417 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
418 sizeof(mode), &mode);
420 struct hci_cp_write_eir cp;
422 memset(hdev->eir, 0, sizeof(hdev->eir));
423 memset(&cp, 0, sizeof(cp));
425 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
429 if (lmp_inq_rssi_capable(hdev))
430 hci_setup_inquiry_mode(req);
432 if (lmp_inq_tx_pwr_capable(hdev))
433 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
435 if (lmp_ext_feat_capable(hdev)) {
436 struct hci_cp_read_local_ext_features cp;
439 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
443 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
445 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
450 static void hci_setup_link_policy(struct hci_request *req)
452 struct hci_dev *hdev = req->hdev;
453 struct hci_cp_write_def_link_policy cp;
456 if (lmp_rswitch_capable(hdev))
457 link_policy |= HCI_LP_RSWITCH;
458 if (lmp_hold_capable(hdev))
459 link_policy |= HCI_LP_HOLD;
460 if (lmp_sniff_capable(hdev))
461 link_policy |= HCI_LP_SNIFF;
462 if (lmp_park_capable(hdev))
463 link_policy |= HCI_LP_PARK;
465 cp.policy = cpu_to_le16(link_policy);
466 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
469 static void hci_set_le_support(struct hci_request *req)
471 struct hci_dev *hdev = req->hdev;
472 struct hci_cp_write_le_host_supported cp;
474 memset(&cp, 0, sizeof(cp));
476 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
478 cp.simul = lmp_le_br_capable(hdev);
481 if (cp.le != lmp_host_le_capable(hdev))
482 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
486 static void hci_init3_req(struct hci_request *req, unsigned long opt)
488 struct hci_dev *hdev = req->hdev;
490 if (hdev->commands[5] & 0x10)
491 hci_setup_link_policy(req);
493 if (lmp_le_capable(hdev))
494 hci_set_le_support(req);
497 static int __hci_init(struct hci_dev *hdev)
501 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
505 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
506 * BR/EDR/LE type controllers. AMP controllers only need the
509 if (hdev->dev_type != HCI_BREDR)
512 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
516 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
519 static void hci_scan_req(struct hci_request *req, unsigned long opt)
523 BT_DBG("%s %x", req->hdev->name, scan);
525 /* Inquiry and Page scans */
526 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
529 static void hci_auth_req(struct hci_request *req, unsigned long opt)
533 BT_DBG("%s %x", req->hdev->name, auth);
536 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
539 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
543 BT_DBG("%s %x", req->hdev->name, encrypt);
546 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
549 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
551 __le16 policy = cpu_to_le16(opt);
553 BT_DBG("%s %x", req->hdev->name, policy);
555 /* Default link policy */
556 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
559 /* Get HCI device by index.
560 * Device is held on return. */
561 struct hci_dev *hci_dev_get(int index)
563 struct hci_dev *hdev = NULL, *d;
570 read_lock(&hci_dev_list_lock);
571 list_for_each_entry(d, &hci_dev_list, list) {
572 if (d->id == index) {
573 hdev = hci_dev_hold(d);
577 read_unlock(&hci_dev_list_lock);
581 /* ---- Inquiry support ---- */
583 bool hci_discovery_active(struct hci_dev *hdev)
585 struct discovery_state *discov = &hdev->discovery;
587 switch (discov->state) {
588 case DISCOVERY_FINDING:
589 case DISCOVERY_RESOLVING:
597 void hci_discovery_set_state(struct hci_dev *hdev, int state)
599 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
601 if (hdev->discovery.state == state)
605 case DISCOVERY_STOPPED:
606 if (hdev->discovery.state != DISCOVERY_STARTING)
607 mgmt_discovering(hdev, 0);
609 case DISCOVERY_STARTING:
611 case DISCOVERY_FINDING:
612 mgmt_discovering(hdev, 1);
614 case DISCOVERY_RESOLVING:
616 case DISCOVERY_STOPPING:
620 hdev->discovery.state = state;
623 static void inquiry_cache_flush(struct hci_dev *hdev)
625 struct discovery_state *cache = &hdev->discovery;
626 struct inquiry_entry *p, *n;
628 list_for_each_entry_safe(p, n, &cache->all, all) {
633 INIT_LIST_HEAD(&cache->unknown);
634 INIT_LIST_HEAD(&cache->resolve);
637 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
640 struct discovery_state *cache = &hdev->discovery;
641 struct inquiry_entry *e;
643 BT_DBG("cache %p, %pMR", cache, bdaddr);
645 list_for_each_entry(e, &cache->all, all) {
646 if (!bacmp(&e->data.bdaddr, bdaddr))
653 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
656 struct discovery_state *cache = &hdev->discovery;
657 struct inquiry_entry *e;
659 BT_DBG("cache %p, %pMR", cache, bdaddr);
661 list_for_each_entry(e, &cache->unknown, list) {
662 if (!bacmp(&e->data.bdaddr, bdaddr))
669 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
673 struct discovery_state *cache = &hdev->discovery;
674 struct inquiry_entry *e;
676 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
678 list_for_each_entry(e, &cache->resolve, list) {
679 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
681 if (!bacmp(&e->data.bdaddr, bdaddr))
688 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
689 struct inquiry_entry *ie)
691 struct discovery_state *cache = &hdev->discovery;
692 struct list_head *pos = &cache->resolve;
693 struct inquiry_entry *p;
697 list_for_each_entry(p, &cache->resolve, list) {
698 if (p->name_state != NAME_PENDING &&
699 abs(p->data.rssi) >= abs(ie->data.rssi))
704 list_add(&ie->list, pos);
707 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
708 bool name_known, bool *ssp)
710 struct discovery_state *cache = &hdev->discovery;
711 struct inquiry_entry *ie;
713 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
715 hci_remove_remote_oob_data(hdev, &data->bdaddr);
718 *ssp = data->ssp_mode;
720 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
722 if (ie->data.ssp_mode && ssp)
725 if (ie->name_state == NAME_NEEDED &&
726 data->rssi != ie->data.rssi) {
727 ie->data.rssi = data->rssi;
728 hci_inquiry_cache_update_resolve(hdev, ie);
734 /* Entry not in the cache. Add new one. */
735 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
739 list_add(&ie->all, &cache->all);
742 ie->name_state = NAME_KNOWN;
744 ie->name_state = NAME_NOT_KNOWN;
745 list_add(&ie->list, &cache->unknown);
749 if (name_known && ie->name_state != NAME_KNOWN &&
750 ie->name_state != NAME_PENDING) {
751 ie->name_state = NAME_KNOWN;
755 memcpy(&ie->data, data, sizeof(*data));
756 ie->timestamp = jiffies;
757 cache->timestamp = jiffies;
759 if (ie->name_state == NAME_NOT_KNOWN)
765 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
767 struct discovery_state *cache = &hdev->discovery;
768 struct inquiry_info *info = (struct inquiry_info *) buf;
769 struct inquiry_entry *e;
772 list_for_each_entry(e, &cache->all, all) {
773 struct inquiry_data *data = &e->data;
778 bacpy(&info->bdaddr, &data->bdaddr);
779 info->pscan_rep_mode = data->pscan_rep_mode;
780 info->pscan_period_mode = data->pscan_period_mode;
781 info->pscan_mode = data->pscan_mode;
782 memcpy(info->dev_class, data->dev_class, 3);
783 info->clock_offset = data->clock_offset;
789 BT_DBG("cache %p, copied %d", cache, copied);
793 static void hci_inq_req(struct hci_request *req, unsigned long opt)
795 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
796 struct hci_dev *hdev = req->hdev;
797 struct hci_cp_inquiry cp;
799 BT_DBG("%s", hdev->name);
801 if (test_bit(HCI_INQUIRY, &hdev->flags))
805 memcpy(&cp.lap, &ir->lap, 3);
806 cp.length = ir->length;
807 cp.num_rsp = ir->num_rsp;
808 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
811 int hci_inquiry(void __user *arg)
813 __u8 __user *ptr = arg;
814 struct hci_inquiry_req ir;
815 struct hci_dev *hdev;
816 int err = 0, do_inquiry = 0, max_rsp;
820 if (copy_from_user(&ir, ptr, sizeof(ir)))
823 hdev = hci_dev_get(ir.dev_id);
828 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
829 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
830 inquiry_cache_flush(hdev);
833 hci_dev_unlock(hdev);
835 timeo = ir.length * msecs_to_jiffies(2000);
838 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
844 /* for unlimited number of responses we will use buffer with
847 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
849 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
850 * copy it to the user space.
852 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
859 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
860 hci_dev_unlock(hdev);
862 BT_DBG("num_rsp %d", ir.num_rsp);
864 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
866 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
879 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
881 u8 ad_len = 0, flags = 0;
884 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
885 flags |= LE_AD_GENERAL;
887 if (!lmp_bredr_capable(hdev))
888 flags |= LE_AD_NO_BREDR;
890 if (lmp_le_br_capable(hdev))
891 flags |= LE_AD_SIM_LE_BREDR_CTRL;
893 if (lmp_host_le_br_capable(hdev))
894 flags |= LE_AD_SIM_LE_BREDR_HOST;
897 BT_DBG("adv flags 0x%02x", flags);
907 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
909 ptr[1] = EIR_TX_POWER;
910 ptr[2] = (u8) hdev->adv_tx_power;
916 name_len = strlen(hdev->dev_name);
918 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
920 if (name_len > max_len) {
922 ptr[1] = EIR_NAME_SHORT;
924 ptr[1] = EIR_NAME_COMPLETE;
926 ptr[0] = name_len + 1;
928 memcpy(ptr + 2, hdev->dev_name, name_len);
930 ad_len += (name_len + 2);
931 ptr += (name_len + 2);
937 int hci_update_ad(struct hci_dev *hdev)
939 struct hci_cp_le_set_adv_data cp;
945 if (!lmp_le_capable(hdev)) {
950 memset(&cp, 0, sizeof(cp));
952 len = create_ad(hdev, cp.data);
954 if (hdev->adv_data_len == len &&
955 memcmp(cp.data, hdev->adv_data, len) == 0) {
960 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
961 hdev->adv_data_len = len;
964 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
967 hci_dev_unlock(hdev);
972 /* ---- HCI ioctl helpers ---- */
974 int hci_dev_open(__u16 dev)
976 struct hci_dev *hdev;
979 hdev = hci_dev_get(dev);
983 BT_DBG("%s %p", hdev->name, hdev);
987 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
992 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
997 if (test_bit(HCI_UP, &hdev->flags)) {
1002 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1003 set_bit(HCI_RAW, &hdev->flags);
1005 /* Treat all non BR/EDR controllers as raw devices if
1006 enable_hs is not set */
1007 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1008 set_bit(HCI_RAW, &hdev->flags);
1010 if (hdev->open(hdev)) {
1015 if (!test_bit(HCI_RAW, &hdev->flags)) {
1016 atomic_set(&hdev->cmd_cnt, 1);
1017 set_bit(HCI_INIT, &hdev->flags);
1018 hdev->init_last_cmd = 0;
1020 ret = __hci_init(hdev);
1022 clear_bit(HCI_INIT, &hdev->flags);
1027 set_bit(HCI_UP, &hdev->flags);
1028 hci_notify(hdev, HCI_DEV_UP);
1029 hci_update_ad(hdev);
1030 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1031 mgmt_valid_hdev(hdev)) {
1033 mgmt_powered(hdev, 1);
1034 hci_dev_unlock(hdev);
1037 /* Init failed, cleanup */
1038 flush_work(&hdev->tx_work);
1039 flush_work(&hdev->cmd_work);
1040 flush_work(&hdev->rx_work);
1042 skb_queue_purge(&hdev->cmd_q);
1043 skb_queue_purge(&hdev->rx_q);
1048 if (hdev->sent_cmd) {
1049 kfree_skb(hdev->sent_cmd);
1050 hdev->sent_cmd = NULL;
1058 hci_req_unlock(hdev);
1063 static int hci_dev_do_close(struct hci_dev *hdev)
1065 BT_DBG("%s %p", hdev->name, hdev);
1067 cancel_work_sync(&hdev->le_scan);
1069 cancel_delayed_work(&hdev->power_off);
1071 hci_req_cancel(hdev, ENODEV);
1074 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1075 del_timer_sync(&hdev->cmd_timer);
1076 hci_req_unlock(hdev);
1080 /* Flush RX and TX works */
1081 flush_work(&hdev->tx_work);
1082 flush_work(&hdev->rx_work);
1084 if (hdev->discov_timeout > 0) {
1085 cancel_delayed_work(&hdev->discov_off);
1086 hdev->discov_timeout = 0;
1087 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1090 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1091 cancel_delayed_work(&hdev->service_cache);
1093 cancel_delayed_work_sync(&hdev->le_scan_disable);
1096 inquiry_cache_flush(hdev);
1097 hci_conn_hash_flush(hdev);
1098 hci_dev_unlock(hdev);
1100 hci_notify(hdev, HCI_DEV_DOWN);
1106 skb_queue_purge(&hdev->cmd_q);
1107 atomic_set(&hdev->cmd_cnt, 1);
1108 if (!test_bit(HCI_RAW, &hdev->flags) &&
1109 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1110 set_bit(HCI_INIT, &hdev->flags);
1111 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1112 clear_bit(HCI_INIT, &hdev->flags);
1115 /* flush cmd work */
1116 flush_work(&hdev->cmd_work);
1119 skb_queue_purge(&hdev->rx_q);
1120 skb_queue_purge(&hdev->cmd_q);
1121 skb_queue_purge(&hdev->raw_q);
1123 /* Drop last sent command */
1124 if (hdev->sent_cmd) {
1125 del_timer_sync(&hdev->cmd_timer);
1126 kfree_skb(hdev->sent_cmd);
1127 hdev->sent_cmd = NULL;
1130 /* After this point our queues are empty
1131 * and no tasks are scheduled. */
1134 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1135 mgmt_valid_hdev(hdev)) {
1137 mgmt_powered(hdev, 0);
1138 hci_dev_unlock(hdev);
1144 /* Controller radio is available but is currently powered down */
1145 hdev->amp_status = 0;
1147 memset(hdev->eir, 0, sizeof(hdev->eir));
1148 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1150 hci_req_unlock(hdev);
1156 int hci_dev_close(__u16 dev)
1158 struct hci_dev *hdev;
1161 hdev = hci_dev_get(dev);
1165 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1166 cancel_delayed_work(&hdev->power_off);
1168 err = hci_dev_do_close(hdev);
1174 int hci_dev_reset(__u16 dev)
1176 struct hci_dev *hdev;
1179 hdev = hci_dev_get(dev);
1185 if (!test_bit(HCI_UP, &hdev->flags))
1189 skb_queue_purge(&hdev->rx_q);
1190 skb_queue_purge(&hdev->cmd_q);
1193 inquiry_cache_flush(hdev);
1194 hci_conn_hash_flush(hdev);
1195 hci_dev_unlock(hdev);
1200 atomic_set(&hdev->cmd_cnt, 1);
1201 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1203 if (!test_bit(HCI_RAW, &hdev->flags))
1204 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1207 hci_req_unlock(hdev);
1212 int hci_dev_reset_stat(__u16 dev)
1214 struct hci_dev *hdev;
1217 hdev = hci_dev_get(dev);
1221 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1228 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1230 struct hci_dev *hdev;
1231 struct hci_dev_req dr;
1234 if (copy_from_user(&dr, arg, sizeof(dr)))
1237 hdev = hci_dev_get(dr.dev_id);
1243 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1248 if (!lmp_encrypt_capable(hdev)) {
1253 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1254 /* Auth must be enabled first */
1255 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1261 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1266 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1271 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1275 case HCISETLINKMODE:
1276 hdev->link_mode = ((__u16) dr.dev_opt) &
1277 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1281 hdev->pkt_type = (__u16) dr.dev_opt;
1285 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1286 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1290 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1291 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1303 int hci_get_dev_list(void __user *arg)
1305 struct hci_dev *hdev;
1306 struct hci_dev_list_req *dl;
1307 struct hci_dev_req *dr;
1308 int n = 0, size, err;
1311 if (get_user(dev_num, (__u16 __user *) arg))
1314 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1317 size = sizeof(*dl) + dev_num * sizeof(*dr);
1319 dl = kzalloc(size, GFP_KERNEL);
1325 read_lock(&hci_dev_list_lock);
1326 list_for_each_entry(hdev, &hci_dev_list, list) {
1327 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1328 cancel_delayed_work(&hdev->power_off);
1330 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1331 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1333 (dr + n)->dev_id = hdev->id;
1334 (dr + n)->dev_opt = hdev->flags;
1339 read_unlock(&hci_dev_list_lock);
1342 size = sizeof(*dl) + n * sizeof(*dr);
1344 err = copy_to_user(arg, dl, size);
1347 return err ? -EFAULT : 0;
1350 int hci_get_dev_info(void __user *arg)
1352 struct hci_dev *hdev;
1353 struct hci_dev_info di;
1356 if (copy_from_user(&di, arg, sizeof(di)))
1359 hdev = hci_dev_get(di.dev_id);
1363 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1364 cancel_delayed_work_sync(&hdev->power_off);
1366 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1367 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1369 strcpy(di.name, hdev->name);
1370 di.bdaddr = hdev->bdaddr;
1371 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1372 di.flags = hdev->flags;
1373 di.pkt_type = hdev->pkt_type;
1374 if (lmp_bredr_capable(hdev)) {
1375 di.acl_mtu = hdev->acl_mtu;
1376 di.acl_pkts = hdev->acl_pkts;
1377 di.sco_mtu = hdev->sco_mtu;
1378 di.sco_pkts = hdev->sco_pkts;
1380 di.acl_mtu = hdev->le_mtu;
1381 di.acl_pkts = hdev->le_pkts;
1385 di.link_policy = hdev->link_policy;
1386 di.link_mode = hdev->link_mode;
1388 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1389 memcpy(&di.features, &hdev->features, sizeof(di.features));
1391 if (copy_to_user(arg, &di, sizeof(di)))
1399 /* ---- Interface to HCI drivers ---- */
1401 static int hci_rfkill_set_block(void *data, bool blocked)
1403 struct hci_dev *hdev = data;
1405 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1410 hci_dev_do_close(hdev);
1415 static const struct rfkill_ops hci_rfkill_ops = {
1416 .set_block = hci_rfkill_set_block,
1419 static void hci_power_on(struct work_struct *work)
1421 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1423 BT_DBG("%s", hdev->name);
1425 if (hci_dev_open(hdev->id) < 0)
1428 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1429 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1430 HCI_AUTO_OFF_TIMEOUT);
1432 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1433 mgmt_index_added(hdev);
1436 static void hci_power_off(struct work_struct *work)
1438 struct hci_dev *hdev = container_of(work, struct hci_dev,
1441 BT_DBG("%s", hdev->name);
1443 hci_dev_do_close(hdev);
1446 static void hci_discov_off(struct work_struct *work)
1448 struct hci_dev *hdev;
1449 u8 scan = SCAN_PAGE;
1451 hdev = container_of(work, struct hci_dev, discov_off.work);
1453 BT_DBG("%s", hdev->name);
1457 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1459 hdev->discov_timeout = 0;
1461 hci_dev_unlock(hdev);
1464 int hci_uuids_clear(struct hci_dev *hdev)
1466 struct bt_uuid *uuid, *tmp;
1468 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1469 list_del(&uuid->list);
1476 int hci_link_keys_clear(struct hci_dev *hdev)
1478 struct list_head *p, *n;
1480 list_for_each_safe(p, n, &hdev->link_keys) {
1481 struct link_key *key;
1483 key = list_entry(p, struct link_key, list);
1492 int hci_smp_ltks_clear(struct hci_dev *hdev)
1494 struct smp_ltk *k, *tmp;
1496 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1504 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1508 list_for_each_entry(k, &hdev->link_keys, list)
1509 if (bacmp(bdaddr, &k->bdaddr) == 0)
1515 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1516 u8 key_type, u8 old_key_type)
1519 if (key_type < 0x03)
1522 /* Debug keys are insecure so don't store them persistently */
1523 if (key_type == HCI_LK_DEBUG_COMBINATION)
1526 /* Changed combination key and there's no previous one */
1527 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1530 /* Security mode 3 case */
1534 /* Neither local nor remote side had no-bonding as requirement */
1535 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1538 /* Local side had dedicated bonding as requirement */
1539 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1542 /* Remote side had dedicated bonding as requirement */
1543 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1546 /* If none of the above criteria match, then don't store the key
1551 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1555 list_for_each_entry(k, &hdev->long_term_keys, list) {
1556 if (k->ediv != ediv ||
1557 memcmp(rand, k->rand, sizeof(k->rand)))
1566 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1571 list_for_each_entry(k, &hdev->long_term_keys, list)
1572 if (addr_type == k->bdaddr_type &&
1573 bacmp(bdaddr, &k->bdaddr) == 0)
1579 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1580 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1582 struct link_key *key, *old_key;
1586 old_key = hci_find_link_key(hdev, bdaddr);
1588 old_key_type = old_key->type;
1591 old_key_type = conn ? conn->key_type : 0xff;
1592 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1595 list_add(&key->list, &hdev->link_keys);
1598 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1600 /* Some buggy controller combinations generate a changed
1601 * combination key for legacy pairing even when there's no
1603 if (type == HCI_LK_CHANGED_COMBINATION &&
1604 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1605 type = HCI_LK_COMBINATION;
1607 conn->key_type = type;
1610 bacpy(&key->bdaddr, bdaddr);
1611 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1612 key->pin_len = pin_len;
1614 if (type == HCI_LK_CHANGED_COMBINATION)
1615 key->type = old_key_type;
1622 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1624 mgmt_new_link_key(hdev, key, persistent);
1627 conn->flush_key = !persistent;
1632 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1633 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1636 struct smp_ltk *key, *old_key;
1638 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1641 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1645 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1648 list_add(&key->list, &hdev->long_term_keys);
1651 bacpy(&key->bdaddr, bdaddr);
1652 key->bdaddr_type = addr_type;
1653 memcpy(key->val, tk, sizeof(key->val));
1654 key->authenticated = authenticated;
1656 key->enc_size = enc_size;
1658 memcpy(key->rand, rand, sizeof(key->rand));
1663 if (type & HCI_SMP_LTK)
1664 mgmt_new_ltk(hdev, key, 1);
1669 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1671 struct link_key *key;
1673 key = hci_find_link_key(hdev, bdaddr);
1677 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1679 list_del(&key->list);
1685 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1687 struct smp_ltk *k, *tmp;
1689 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1690 if (bacmp(bdaddr, &k->bdaddr))
1693 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1702 /* HCI command timer function */
1703 static void hci_cmd_timeout(unsigned long arg)
1705 struct hci_dev *hdev = (void *) arg;
1707 if (hdev->sent_cmd) {
1708 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1709 u16 opcode = __le16_to_cpu(sent->opcode);
1711 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1713 BT_ERR("%s command tx timeout", hdev->name);
1716 atomic_set(&hdev->cmd_cnt, 1);
1717 queue_work(hdev->workqueue, &hdev->cmd_work);
1720 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1723 struct oob_data *data;
1725 list_for_each_entry(data, &hdev->remote_oob_data, list)
1726 if (bacmp(bdaddr, &data->bdaddr) == 0)
1732 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1734 struct oob_data *data;
1736 data = hci_find_remote_oob_data(hdev, bdaddr);
1740 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1742 list_del(&data->list);
1748 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1750 struct oob_data *data, *n;
1752 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1753 list_del(&data->list);
1760 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1763 struct oob_data *data;
1765 data = hci_find_remote_oob_data(hdev, bdaddr);
1768 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1772 bacpy(&data->bdaddr, bdaddr);
1773 list_add(&data->list, &hdev->remote_oob_data);
1776 memcpy(data->hash, hash, sizeof(data->hash));
1777 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1779 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1784 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1786 struct bdaddr_list *b;
1788 list_for_each_entry(b, &hdev->blacklist, list)
1789 if (bacmp(bdaddr, &b->bdaddr) == 0)
1795 int hci_blacklist_clear(struct hci_dev *hdev)
1797 struct list_head *p, *n;
1799 list_for_each_safe(p, n, &hdev->blacklist) {
1800 struct bdaddr_list *b;
1802 b = list_entry(p, struct bdaddr_list, list);
1811 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1813 struct bdaddr_list *entry;
1815 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1818 if (hci_blacklist_lookup(hdev, bdaddr))
1821 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1825 bacpy(&entry->bdaddr, bdaddr);
1827 list_add(&entry->list, &hdev->blacklist);
1829 return mgmt_device_blocked(hdev, bdaddr, type);
1832 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1834 struct bdaddr_list *entry;
1836 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1837 return hci_blacklist_clear(hdev);
1839 entry = hci_blacklist_lookup(hdev, bdaddr);
1843 list_del(&entry->list);
1846 return mgmt_device_unblocked(hdev, bdaddr, type);
1849 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1851 struct le_scan_params *param = (struct le_scan_params *) opt;
1852 struct hci_cp_le_set_scan_param cp;
1854 memset(&cp, 0, sizeof(cp));
1855 cp.type = param->type;
1856 cp.interval = cpu_to_le16(param->interval);
1857 cp.window = cpu_to_le16(param->window);
1859 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1862 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1864 struct hci_cp_le_set_scan_enable cp;
1866 memset(&cp, 0, sizeof(cp));
1870 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1873 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1874 u16 window, int timeout)
1876 long timeo = msecs_to_jiffies(3000);
1877 struct le_scan_params param;
1880 BT_DBG("%s", hdev->name);
1882 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1883 return -EINPROGRESS;
1886 param.interval = interval;
1887 param.window = window;
1891 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) ¶m,
1894 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
1896 hci_req_unlock(hdev);
1901 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1902 msecs_to_jiffies(timeout));
1907 int hci_cancel_le_scan(struct hci_dev *hdev)
1909 BT_DBG("%s", hdev->name);
1911 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1914 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1915 struct hci_cp_le_set_scan_enable cp;
1917 /* Send HCI command to disable LE Scan */
1918 memset(&cp, 0, sizeof(cp));
1919 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1925 static void le_scan_disable_work(struct work_struct *work)
1927 struct hci_dev *hdev = container_of(work, struct hci_dev,
1928 le_scan_disable.work);
1929 struct hci_cp_le_set_scan_enable cp;
1931 BT_DBG("%s", hdev->name);
1933 memset(&cp, 0, sizeof(cp));
1935 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1938 static void le_scan_work(struct work_struct *work)
1940 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1941 struct le_scan_params *param = &hdev->le_scan_params;
1943 BT_DBG("%s", hdev->name);
1945 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1949 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1952 struct le_scan_params *param = &hdev->le_scan_params;
1954 BT_DBG("%s", hdev->name);
1956 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1959 if (work_busy(&hdev->le_scan))
1960 return -EINPROGRESS;
1963 param->interval = interval;
1964 param->window = window;
1965 param->timeout = timeout;
1967 queue_work(system_long_wq, &hdev->le_scan);
1972 /* Alloc HCI device */
1973 struct hci_dev *hci_alloc_dev(void)
1975 struct hci_dev *hdev;
1977 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1981 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1982 hdev->esco_type = (ESCO_HV1);
1983 hdev->link_mode = (HCI_LM_ACCEPT);
1984 hdev->io_capability = 0x03; /* No Input No Output */
1985 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1986 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1988 hdev->sniff_max_interval = 800;
1989 hdev->sniff_min_interval = 80;
1991 mutex_init(&hdev->lock);
1992 mutex_init(&hdev->req_lock);
1994 INIT_LIST_HEAD(&hdev->mgmt_pending);
1995 INIT_LIST_HEAD(&hdev->blacklist);
1996 INIT_LIST_HEAD(&hdev->uuids);
1997 INIT_LIST_HEAD(&hdev->link_keys);
1998 INIT_LIST_HEAD(&hdev->long_term_keys);
1999 INIT_LIST_HEAD(&hdev->remote_oob_data);
2000 INIT_LIST_HEAD(&hdev->conn_hash.list);
2002 INIT_WORK(&hdev->rx_work, hci_rx_work);
2003 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2004 INIT_WORK(&hdev->tx_work, hci_tx_work);
2005 INIT_WORK(&hdev->power_on, hci_power_on);
2006 INIT_WORK(&hdev->le_scan, le_scan_work);
2008 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2009 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2010 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2012 skb_queue_head_init(&hdev->driver_init);
2013 skb_queue_head_init(&hdev->rx_q);
2014 skb_queue_head_init(&hdev->cmd_q);
2015 skb_queue_head_init(&hdev->raw_q);
2017 init_waitqueue_head(&hdev->req_wait_q);
2019 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2021 hci_init_sysfs(hdev);
2022 discovery_init(hdev);
2026 EXPORT_SYMBOL(hci_alloc_dev);
2028 /* Free HCI device */
2029 void hci_free_dev(struct hci_dev *hdev)
2031 skb_queue_purge(&hdev->driver_init);
2033 /* will free via device release */
2034 put_device(&hdev->dev);
2036 EXPORT_SYMBOL(hci_free_dev);
2038 /* Register HCI device */
2039 int hci_register_dev(struct hci_dev *hdev)
2043 if (!hdev->open || !hdev->close)
2046 /* Do not allow HCI_AMP devices to register at index 0,
2047 * so the index can be used as the AMP controller ID.
2049 switch (hdev->dev_type) {
2051 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2054 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2063 sprintf(hdev->name, "hci%d", id);
2066 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2068 write_lock(&hci_dev_list_lock);
2069 list_add(&hdev->list, &hci_dev_list);
2070 write_unlock(&hci_dev_list_lock);
2072 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2074 if (!hdev->workqueue) {
2079 hdev->req_workqueue = alloc_workqueue(hdev->name,
2080 WQ_HIGHPRI | WQ_UNBOUND |
2082 if (!hdev->req_workqueue) {
2083 destroy_workqueue(hdev->workqueue);
2088 error = hci_add_sysfs(hdev);
2092 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2093 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2096 if (rfkill_register(hdev->rfkill) < 0) {
2097 rfkill_destroy(hdev->rfkill);
2098 hdev->rfkill = NULL;
2102 set_bit(HCI_SETUP, &hdev->dev_flags);
2104 if (hdev->dev_type != HCI_AMP)
2105 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2107 hci_notify(hdev, HCI_DEV_REG);
2110 queue_work(hdev->req_workqueue, &hdev->power_on);
2115 destroy_workqueue(hdev->workqueue);
2116 destroy_workqueue(hdev->req_workqueue);
2118 ida_simple_remove(&hci_index_ida, hdev->id);
2119 write_lock(&hci_dev_list_lock);
2120 list_del(&hdev->list);
2121 write_unlock(&hci_dev_list_lock);
2125 EXPORT_SYMBOL(hci_register_dev);
2127 /* Unregister HCI device */
2128 void hci_unregister_dev(struct hci_dev *hdev)
2132 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2134 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2138 write_lock(&hci_dev_list_lock);
2139 list_del(&hdev->list);
2140 write_unlock(&hci_dev_list_lock);
2142 hci_dev_do_close(hdev);
2144 for (i = 0; i < NUM_REASSEMBLY; i++)
2145 kfree_skb(hdev->reassembly[i]);
2147 cancel_work_sync(&hdev->power_on);
2149 if (!test_bit(HCI_INIT, &hdev->flags) &&
2150 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2152 mgmt_index_removed(hdev);
2153 hci_dev_unlock(hdev);
2156 /* mgmt_index_removed should take care of emptying the
2158 BUG_ON(!list_empty(&hdev->mgmt_pending));
2160 hci_notify(hdev, HCI_DEV_UNREG);
2163 rfkill_unregister(hdev->rfkill);
2164 rfkill_destroy(hdev->rfkill);
2167 hci_del_sysfs(hdev);
2169 destroy_workqueue(hdev->workqueue);
2170 destroy_workqueue(hdev->req_workqueue);
2173 hci_blacklist_clear(hdev);
2174 hci_uuids_clear(hdev);
2175 hci_link_keys_clear(hdev);
2176 hci_smp_ltks_clear(hdev);
2177 hci_remote_oob_data_clear(hdev);
2178 hci_dev_unlock(hdev);
2182 ida_simple_remove(&hci_index_ida, id);
2184 EXPORT_SYMBOL(hci_unregister_dev);
2186 /* Suspend HCI device */
2187 int hci_suspend_dev(struct hci_dev *hdev)
2189 hci_notify(hdev, HCI_DEV_SUSPEND);
2192 EXPORT_SYMBOL(hci_suspend_dev);
2194 /* Resume HCI device */
2195 int hci_resume_dev(struct hci_dev *hdev)
2197 hci_notify(hdev, HCI_DEV_RESUME);
2200 EXPORT_SYMBOL(hci_resume_dev);
2202 /* Receive frame from HCI drivers */
2203 int hci_recv_frame(struct sk_buff *skb)
2205 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2206 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2207 && !test_bit(HCI_INIT, &hdev->flags))) {
2213 bt_cb(skb)->incoming = 1;
2216 __net_timestamp(skb);
2218 skb_queue_tail(&hdev->rx_q, skb);
2219 queue_work(hdev->workqueue, &hdev->rx_work);
2223 EXPORT_SYMBOL(hci_recv_frame);
2225 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2226 int count, __u8 index)
2231 struct sk_buff *skb;
2232 struct bt_skb_cb *scb;
2234 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2235 index >= NUM_REASSEMBLY)
2238 skb = hdev->reassembly[index];
2242 case HCI_ACLDATA_PKT:
2243 len = HCI_MAX_FRAME_SIZE;
2244 hlen = HCI_ACL_HDR_SIZE;
2247 len = HCI_MAX_EVENT_SIZE;
2248 hlen = HCI_EVENT_HDR_SIZE;
2250 case HCI_SCODATA_PKT:
2251 len = HCI_MAX_SCO_SIZE;
2252 hlen = HCI_SCO_HDR_SIZE;
2256 skb = bt_skb_alloc(len, GFP_ATOMIC);
2260 scb = (void *) skb->cb;
2262 scb->pkt_type = type;
2264 skb->dev = (void *) hdev;
2265 hdev->reassembly[index] = skb;
2269 scb = (void *) skb->cb;
2270 len = min_t(uint, scb->expect, count);
2272 memcpy(skb_put(skb, len), data, len);
2281 if (skb->len == HCI_EVENT_HDR_SIZE) {
2282 struct hci_event_hdr *h = hci_event_hdr(skb);
2283 scb->expect = h->plen;
2285 if (skb_tailroom(skb) < scb->expect) {
2287 hdev->reassembly[index] = NULL;
2293 case HCI_ACLDATA_PKT:
2294 if (skb->len == HCI_ACL_HDR_SIZE) {
2295 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2296 scb->expect = __le16_to_cpu(h->dlen);
2298 if (skb_tailroom(skb) < scb->expect) {
2300 hdev->reassembly[index] = NULL;
2306 case HCI_SCODATA_PKT:
2307 if (skb->len == HCI_SCO_HDR_SIZE) {
2308 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2309 scb->expect = h->dlen;
2311 if (skb_tailroom(skb) < scb->expect) {
2313 hdev->reassembly[index] = NULL;
2320 if (scb->expect == 0) {
2321 /* Complete frame */
2323 bt_cb(skb)->pkt_type = type;
2324 hci_recv_frame(skb);
2326 hdev->reassembly[index] = NULL;
2334 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2338 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2342 rem = hci_reassembly(hdev, type, data, count, type - 1);
2346 data += (count - rem);
2352 EXPORT_SYMBOL(hci_recv_fragment);
2354 #define STREAM_REASSEMBLY 0
2356 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2362 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2365 struct { char type; } *pkt;
2367 /* Start of the frame */
2374 type = bt_cb(skb)->pkt_type;
2376 rem = hci_reassembly(hdev, type, data, count,
2381 data += (count - rem);
2387 EXPORT_SYMBOL(hci_recv_stream_fragment);
2389 /* ---- Interface to upper protocols ---- */
2391 int hci_register_cb(struct hci_cb *cb)
2393 BT_DBG("%p name %s", cb, cb->name);
2395 write_lock(&hci_cb_list_lock);
2396 list_add(&cb->list, &hci_cb_list);
2397 write_unlock(&hci_cb_list_lock);
2401 EXPORT_SYMBOL(hci_register_cb);
2403 int hci_unregister_cb(struct hci_cb *cb)
2405 BT_DBG("%p name %s", cb, cb->name);
2407 write_lock(&hci_cb_list_lock);
2408 list_del(&cb->list);
2409 write_unlock(&hci_cb_list_lock);
2413 EXPORT_SYMBOL(hci_unregister_cb);
2415 static int hci_send_frame(struct sk_buff *skb)
2417 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2424 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2427 __net_timestamp(skb);
2429 /* Send copy to monitor */
2430 hci_send_to_monitor(hdev, skb);
2432 if (atomic_read(&hdev->promisc)) {
2433 /* Send copy to the sockets */
2434 hci_send_to_sock(hdev, skb);
2437 /* Get rid of skb owner, prior to sending to the driver. */
2440 return hdev->send(skb);
2443 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2445 skb_queue_head_init(&req->cmd_q);
2449 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2451 struct hci_dev *hdev = req->hdev;
2452 struct sk_buff *skb;
2453 unsigned long flags;
2455 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2457 /* Do not allow empty requests */
2458 if (skb_queue_empty(&req->cmd_q))
2461 skb = skb_peek_tail(&req->cmd_q);
2462 bt_cb(skb)->req.complete = complete;
2464 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2465 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2466 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2468 queue_work(hdev->workqueue, &hdev->cmd_work);
2473 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2474 u32 plen, void *param)
2476 int len = HCI_COMMAND_HDR_SIZE + plen;
2477 struct hci_command_hdr *hdr;
2478 struct sk_buff *skb;
2480 skb = bt_skb_alloc(len, GFP_ATOMIC);
2484 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2485 hdr->opcode = cpu_to_le16(opcode);
2489 memcpy(skb_put(skb, plen), param, plen);
2491 BT_DBG("skb len %d", skb->len);
2493 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2494 skb->dev = (void *) hdev;
2499 /* Send HCI command */
2500 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2502 struct sk_buff *skb;
2504 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2506 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2508 BT_ERR("%s no memory for command", hdev->name);
2512 if (test_bit(HCI_INIT, &hdev->flags))
2513 hdev->init_last_cmd = opcode;
2515 /* Stand-alone HCI commands must be flaged as
2516 * single-command requests.
2518 bt_cb(skb)->req.start = true;
2520 skb_queue_tail(&hdev->cmd_q, skb);
2521 queue_work(hdev->workqueue, &hdev->cmd_work);
2526 /* Queue a command to an asynchronous HCI request */
2527 int hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2529 struct hci_dev *hdev = req->hdev;
2530 struct sk_buff *skb;
2532 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2534 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2536 BT_ERR("%s no memory for command", hdev->name);
2540 if (skb_queue_empty(&req->cmd_q))
2541 bt_cb(skb)->req.start = true;
2543 skb_queue_tail(&req->cmd_q, skb);
2548 /* Get data from the previously sent command */
2549 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2551 struct hci_command_hdr *hdr;
2553 if (!hdev->sent_cmd)
2556 hdr = (void *) hdev->sent_cmd->data;
2558 if (hdr->opcode != cpu_to_le16(opcode))
2561 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2563 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2567 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2569 struct hci_acl_hdr *hdr;
2572 skb_push(skb, HCI_ACL_HDR_SIZE);
2573 skb_reset_transport_header(skb);
2574 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2575 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2576 hdr->dlen = cpu_to_le16(len);
2579 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2580 struct sk_buff *skb, __u16 flags)
2582 struct hci_conn *conn = chan->conn;
2583 struct hci_dev *hdev = conn->hdev;
2584 struct sk_buff *list;
2586 skb->len = skb_headlen(skb);
2589 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2591 switch (hdev->dev_type) {
2593 hci_add_acl_hdr(skb, conn->handle, flags);
2596 hci_add_acl_hdr(skb, chan->handle, flags);
2599 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2603 list = skb_shinfo(skb)->frag_list;
2605 /* Non fragmented */
2606 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2608 skb_queue_tail(queue, skb);
2611 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2613 skb_shinfo(skb)->frag_list = NULL;
2615 /* Queue all fragments atomically */
2616 spin_lock(&queue->lock);
2618 __skb_queue_tail(queue, skb);
2620 flags &= ~ACL_START;
2623 skb = list; list = list->next;
2625 skb->dev = (void *) hdev;
2626 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2627 hci_add_acl_hdr(skb, conn->handle, flags);
2629 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2631 __skb_queue_tail(queue, skb);
2634 spin_unlock(&queue->lock);
2638 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2640 struct hci_dev *hdev = chan->conn->hdev;
2642 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2644 skb->dev = (void *) hdev;
2646 hci_queue_acl(chan, &chan->data_q, skb, flags);
2648 queue_work(hdev->workqueue, &hdev->tx_work);
2652 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2654 struct hci_dev *hdev = conn->hdev;
2655 struct hci_sco_hdr hdr;
2657 BT_DBG("%s len %d", hdev->name, skb->len);
2659 hdr.handle = cpu_to_le16(conn->handle);
2660 hdr.dlen = skb->len;
2662 skb_push(skb, HCI_SCO_HDR_SIZE);
2663 skb_reset_transport_header(skb);
2664 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2666 skb->dev = (void *) hdev;
2667 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2669 skb_queue_tail(&conn->data_q, skb);
2670 queue_work(hdev->workqueue, &hdev->tx_work);
2673 /* ---- HCI TX task (outgoing data) ---- */
2675 /* HCI Connection scheduler */
2676 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2679 struct hci_conn_hash *h = &hdev->conn_hash;
2680 struct hci_conn *conn = NULL, *c;
2681 unsigned int num = 0, min = ~0;
2683 /* We don't have to lock device here. Connections are always
2684 * added and removed with TX task disabled. */
2688 list_for_each_entry_rcu(c, &h->list, list) {
2689 if (c->type != type || skb_queue_empty(&c->data_q))
2692 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2697 if (c->sent < min) {
2702 if (hci_conn_num(hdev, type) == num)
2711 switch (conn->type) {
2713 cnt = hdev->acl_cnt;
2717 cnt = hdev->sco_cnt;
2720 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2724 BT_ERR("Unknown link type");
2732 BT_DBG("conn %p quote %d", conn, *quote);
2736 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2738 struct hci_conn_hash *h = &hdev->conn_hash;
2741 BT_ERR("%s link tx timeout", hdev->name);
2745 /* Kill stalled connections */
2746 list_for_each_entry_rcu(c, &h->list, list) {
2747 if (c->type == type && c->sent) {
2748 BT_ERR("%s killing stalled connection %pMR",
2749 hdev->name, &c->dst);
2750 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2757 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2760 struct hci_conn_hash *h = &hdev->conn_hash;
2761 struct hci_chan *chan = NULL;
2762 unsigned int num = 0, min = ~0, cur_prio = 0;
2763 struct hci_conn *conn;
2764 int cnt, q, conn_num = 0;
2766 BT_DBG("%s", hdev->name);
2770 list_for_each_entry_rcu(conn, &h->list, list) {
2771 struct hci_chan *tmp;
2773 if (conn->type != type)
2776 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2781 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2782 struct sk_buff *skb;
2784 if (skb_queue_empty(&tmp->data_q))
2787 skb = skb_peek(&tmp->data_q);
2788 if (skb->priority < cur_prio)
2791 if (skb->priority > cur_prio) {
2794 cur_prio = skb->priority;
2799 if (conn->sent < min) {
2805 if (hci_conn_num(hdev, type) == conn_num)
2814 switch (chan->conn->type) {
2816 cnt = hdev->acl_cnt;
2819 cnt = hdev->block_cnt;
2823 cnt = hdev->sco_cnt;
2826 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2830 BT_ERR("Unknown link type");
2835 BT_DBG("chan %p quote %d", chan, *quote);
2839 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2841 struct hci_conn_hash *h = &hdev->conn_hash;
2842 struct hci_conn *conn;
2845 BT_DBG("%s", hdev->name);
2849 list_for_each_entry_rcu(conn, &h->list, list) {
2850 struct hci_chan *chan;
2852 if (conn->type != type)
2855 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2860 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2861 struct sk_buff *skb;
2868 if (skb_queue_empty(&chan->data_q))
2871 skb = skb_peek(&chan->data_q);
2872 if (skb->priority >= HCI_PRIO_MAX - 1)
2875 skb->priority = HCI_PRIO_MAX - 1;
2877 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2881 if (hci_conn_num(hdev, type) == num)
2889 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2891 /* Calculate count of blocks used by this packet */
2892 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2895 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2897 if (!test_bit(HCI_RAW, &hdev->flags)) {
2898 /* ACL tx timeout must be longer than maximum
2899 * link supervision timeout (40.9 seconds) */
2900 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2901 HCI_ACL_TX_TIMEOUT))
2902 hci_link_tx_to(hdev, ACL_LINK);
2906 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2908 unsigned int cnt = hdev->acl_cnt;
2909 struct hci_chan *chan;
2910 struct sk_buff *skb;
2913 __check_timeout(hdev, cnt);
2915 while (hdev->acl_cnt &&
2916 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2917 u32 priority = (skb_peek(&chan->data_q))->priority;
2918 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2919 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2920 skb->len, skb->priority);
2922 /* Stop if priority has changed */
2923 if (skb->priority < priority)
2926 skb = skb_dequeue(&chan->data_q);
2928 hci_conn_enter_active_mode(chan->conn,
2929 bt_cb(skb)->force_active);
2931 hci_send_frame(skb);
2932 hdev->acl_last_tx = jiffies;
2940 if (cnt != hdev->acl_cnt)
2941 hci_prio_recalculate(hdev, ACL_LINK);
2944 static void hci_sched_acl_blk(struct hci_dev *hdev)
2946 unsigned int cnt = hdev->block_cnt;
2947 struct hci_chan *chan;
2948 struct sk_buff *skb;
2952 __check_timeout(hdev, cnt);
2954 BT_DBG("%s", hdev->name);
2956 if (hdev->dev_type == HCI_AMP)
2961 while (hdev->block_cnt > 0 &&
2962 (chan = hci_chan_sent(hdev, type, "e))) {
2963 u32 priority = (skb_peek(&chan->data_q))->priority;
2964 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2967 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2968 skb->len, skb->priority);
2970 /* Stop if priority has changed */
2971 if (skb->priority < priority)
2974 skb = skb_dequeue(&chan->data_q);
2976 blocks = __get_blocks(hdev, skb);
2977 if (blocks > hdev->block_cnt)
2980 hci_conn_enter_active_mode(chan->conn,
2981 bt_cb(skb)->force_active);
2983 hci_send_frame(skb);
2984 hdev->acl_last_tx = jiffies;
2986 hdev->block_cnt -= blocks;
2989 chan->sent += blocks;
2990 chan->conn->sent += blocks;
2994 if (cnt != hdev->block_cnt)
2995 hci_prio_recalculate(hdev, type);
2998 static void hci_sched_acl(struct hci_dev *hdev)
3000 BT_DBG("%s", hdev->name);
3002 /* No ACL link over BR/EDR controller */
3003 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3006 /* No AMP link over AMP controller */
3007 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3010 switch (hdev->flow_ctl_mode) {
3011 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3012 hci_sched_acl_pkt(hdev);
3015 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3016 hci_sched_acl_blk(hdev);
3022 static void hci_sched_sco(struct hci_dev *hdev)
3024 struct hci_conn *conn;
3025 struct sk_buff *skb;
3028 BT_DBG("%s", hdev->name);
3030 if (!hci_conn_num(hdev, SCO_LINK))
3033 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3034 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3035 BT_DBG("skb %p len %d", skb, skb->len);
3036 hci_send_frame(skb);
3039 if (conn->sent == ~0)
3045 static void hci_sched_esco(struct hci_dev *hdev)
3047 struct hci_conn *conn;
3048 struct sk_buff *skb;
3051 BT_DBG("%s", hdev->name);
3053 if (!hci_conn_num(hdev, ESCO_LINK))
3056 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3058 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3059 BT_DBG("skb %p len %d", skb, skb->len);
3060 hci_send_frame(skb);
3063 if (conn->sent == ~0)
3069 static void hci_sched_le(struct hci_dev *hdev)
3071 struct hci_chan *chan;
3072 struct sk_buff *skb;
3073 int quote, cnt, tmp;
3075 BT_DBG("%s", hdev->name);
3077 if (!hci_conn_num(hdev, LE_LINK))
3080 if (!test_bit(HCI_RAW, &hdev->flags)) {
3081 /* LE tx timeout must be longer than maximum
3082 * link supervision timeout (40.9 seconds) */
3083 if (!hdev->le_cnt && hdev->le_pkts &&
3084 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3085 hci_link_tx_to(hdev, LE_LINK);
3088 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3090 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3091 u32 priority = (skb_peek(&chan->data_q))->priority;
3092 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3093 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3094 skb->len, skb->priority);
3096 /* Stop if priority has changed */
3097 if (skb->priority < priority)
3100 skb = skb_dequeue(&chan->data_q);
3102 hci_send_frame(skb);
3103 hdev->le_last_tx = jiffies;
3114 hdev->acl_cnt = cnt;
3117 hci_prio_recalculate(hdev, LE_LINK);
3120 static void hci_tx_work(struct work_struct *work)
3122 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3123 struct sk_buff *skb;
3125 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3126 hdev->sco_cnt, hdev->le_cnt);
3128 /* Schedule queues and send stuff to HCI driver */
3130 hci_sched_acl(hdev);
3132 hci_sched_sco(hdev);
3134 hci_sched_esco(hdev);
3138 /* Send next queued raw (unknown type) packet */
3139 while ((skb = skb_dequeue(&hdev->raw_q)))
3140 hci_send_frame(skb);
3143 /* ----- HCI RX task (incoming data processing) ----- */
3145 /* ACL data packet */
3146 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3148 struct hci_acl_hdr *hdr = (void *) skb->data;
3149 struct hci_conn *conn;
3150 __u16 handle, flags;
3152 skb_pull(skb, HCI_ACL_HDR_SIZE);
3154 handle = __le16_to_cpu(hdr->handle);
3155 flags = hci_flags(handle);
3156 handle = hci_handle(handle);
3158 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3161 hdev->stat.acl_rx++;
3164 conn = hci_conn_hash_lookup_handle(hdev, handle);
3165 hci_dev_unlock(hdev);
3168 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3170 /* Send to upper protocol */
3171 l2cap_recv_acldata(conn, skb, flags);
3174 BT_ERR("%s ACL packet for unknown connection handle %d",
3175 hdev->name, handle);
3181 /* SCO data packet */
3182 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3184 struct hci_sco_hdr *hdr = (void *) skb->data;
3185 struct hci_conn *conn;
3188 skb_pull(skb, HCI_SCO_HDR_SIZE);
3190 handle = __le16_to_cpu(hdr->handle);
3192 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3194 hdev->stat.sco_rx++;
3197 conn = hci_conn_hash_lookup_handle(hdev, handle);
3198 hci_dev_unlock(hdev);
3201 /* Send to upper protocol */
3202 sco_recv_scodata(conn, skb);
3205 BT_ERR("%s SCO packet for unknown connection handle %d",
3206 hdev->name, handle);
3212 static bool hci_req_is_complete(struct hci_dev *hdev)
3214 struct sk_buff *skb;
3216 skb = skb_peek(&hdev->cmd_q);
3220 return bt_cb(skb)->req.start;
3223 static void hci_resend_last(struct hci_dev *hdev)
3225 struct hci_command_hdr *sent;
3226 struct sk_buff *skb;
3229 if (!hdev->sent_cmd)
3232 sent = (void *) hdev->sent_cmd->data;
3233 opcode = __le16_to_cpu(sent->opcode);
3234 if (opcode == HCI_OP_RESET)
3237 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3241 skb_queue_head(&hdev->cmd_q, skb);
3242 queue_work(hdev->workqueue, &hdev->cmd_work);
3245 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3247 hci_req_complete_t req_complete = NULL;
3248 struct sk_buff *skb;
3249 unsigned long flags;
3251 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3253 /* If the completed command doesn't match the last one that was
3254 * sent we need to do special handling of it.
3256 if (!hci_sent_cmd_data(hdev, opcode)) {
3257 /* Some CSR based controllers generate a spontaneous
3258 * reset complete event during init and any pending
3259 * command will never be completed. In such a case we
3260 * need to resend whatever was the last sent
3263 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3264 hci_resend_last(hdev);
3269 /* If the command succeeded and there's still more commands in
3270 * this request the request is not yet complete.
3272 if (!status && !hci_req_is_complete(hdev))
3275 /* If this was the last command in a request the complete
3276 * callback would be found in hdev->sent_cmd instead of the
3277 * command queue (hdev->cmd_q).
3279 if (hdev->sent_cmd) {
3280 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3285 /* Remove all pending commands belonging to this request */
3286 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3287 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3288 if (bt_cb(skb)->req.start) {
3289 __skb_queue_head(&hdev->cmd_q, skb);
3293 req_complete = bt_cb(skb)->req.complete;
3296 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3300 req_complete(hdev, status);
3303 void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3305 hci_req_complete_t req_complete = NULL;
3307 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3310 hci_req_cmd_complete(hdev, opcode, status);
3314 /* No need to handle success status if there are more commands */
3315 if (!hci_req_is_complete(hdev))
3319 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3321 /* If the request doesn't have a complete callback or there
3322 * are other commands/requests in the hdev queue we consider
3323 * this request as completed.
3325 if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3326 hci_req_cmd_complete(hdev, opcode, status);
3329 static void hci_rx_work(struct work_struct *work)
3331 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3332 struct sk_buff *skb;
3334 BT_DBG("%s", hdev->name);
3336 while ((skb = skb_dequeue(&hdev->rx_q))) {
3337 /* Send copy to monitor */
3338 hci_send_to_monitor(hdev, skb);
3340 if (atomic_read(&hdev->promisc)) {
3341 /* Send copy to the sockets */
3342 hci_send_to_sock(hdev, skb);
3345 if (test_bit(HCI_RAW, &hdev->flags)) {
3350 if (test_bit(HCI_INIT, &hdev->flags)) {
3351 /* Don't process data packets in this states. */
3352 switch (bt_cb(skb)->pkt_type) {
3353 case HCI_ACLDATA_PKT:
3354 case HCI_SCODATA_PKT:
3361 switch (bt_cb(skb)->pkt_type) {
3363 BT_DBG("%s Event packet", hdev->name);
3364 hci_event_packet(hdev, skb);
3367 case HCI_ACLDATA_PKT:
3368 BT_DBG("%s ACL data packet", hdev->name);
3369 hci_acldata_packet(hdev, skb);
3372 case HCI_SCODATA_PKT:
3373 BT_DBG("%s SCO data packet", hdev->name);
3374 hci_scodata_packet(hdev, skb);
3384 static void hci_cmd_work(struct work_struct *work)
3386 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3387 struct sk_buff *skb;
3389 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3390 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3392 /* Send queued commands */
3393 if (atomic_read(&hdev->cmd_cnt)) {
3394 skb = skb_dequeue(&hdev->cmd_q);
3398 kfree_skb(hdev->sent_cmd);
3400 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3401 if (hdev->sent_cmd) {
3402 atomic_dec(&hdev->cmd_cnt);
3403 hci_send_frame(skb);
3404 if (test_bit(HCI_RESET, &hdev->flags))
3405 del_timer(&hdev->cmd_timer);
3407 mod_timer(&hdev->cmd_timer,
3408 jiffies + HCI_CMD_TIMEOUT);
3410 skb_queue_head(&hdev->cmd_q, skb);
3411 queue_work(hdev->workqueue, &hdev->cmd_work);
3416 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3418 /* General inquiry access code (GIAC) */
3419 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3420 struct hci_cp_inquiry cp;
3422 BT_DBG("%s", hdev->name);
3424 if (test_bit(HCI_INQUIRY, &hdev->flags))
3425 return -EINPROGRESS;
3427 inquiry_cache_flush(hdev);
3429 memset(&cp, 0, sizeof(cp));
3430 memcpy(&cp.lap, lap, sizeof(cp.lap));
3433 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3436 int hci_cancel_inquiry(struct hci_dev *hdev)
3438 BT_DBG("%s", hdev->name);
3440 if (!test_bit(HCI_INQUIRY, &hdev->flags))
3443 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3446 u8 bdaddr_to_le(u8 bdaddr_type)
3448 switch (bdaddr_type) {
3449 case BDADDR_LE_PUBLIC:
3450 return ADDR_LE_DEV_PUBLIC;
3453 /* Fallback to LE Random address type */
3454 return ADDR_LE_DEV_RANDOM;