2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
37 /* Handle HCI Event packets */
39 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
41 __u8 status = *((__u8 *) skb->data);
43 BT_DBG("%s status 0x%2.2x", hdev->name, status);
48 clear_bit(HCI_INQUIRY, &hdev->flags);
49 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50 wake_up_bit(&hdev->flags, HCI_INQUIRY);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
56 hci_conn_check_pending(hdev);
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
61 __u8 status = *((__u8 *) skb->data);
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
73 __u8 status = *((__u8 *) skb->data);
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
82 hci_conn_check_pending(hdev);
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 BT_DBG("%s", hdev->name);
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 clear_bit(HCI_CONN_MASTER, &conn->flags);
108 set_bit(HCI_CONN_MASTER, &conn->flags);
111 hci_dev_unlock(hdev);
114 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
116 struct hci_rp_read_link_policy *rp = (void *) skb->data;
117 struct hci_conn *conn;
119 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
126 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
128 conn->link_policy = __le16_to_cpu(rp->policy);
130 hci_dev_unlock(hdev);
133 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
135 struct hci_rp_write_link_policy *rp = (void *) skb->data;
136 struct hci_conn *conn;
139 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
144 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 conn->link_policy = get_unaligned_le16(sent + 2);
154 hci_dev_unlock(hdev);
157 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
160 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
167 hdev->link_policy = __le16_to_cpu(rp->policy);
170 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
173 __u8 status = *((__u8 *) skb->data);
176 BT_DBG("%s status 0x%2.2x", hdev->name, status);
181 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
185 hdev->link_policy = get_unaligned_le16(sent);
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
190 __u8 status = *((__u8 *) skb->data);
192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
194 clear_bit(HCI_RESET, &hdev->flags);
196 /* Reset all non-persistent flags */
197 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
199 hdev->discovery.state = DISCOVERY_STOPPED;
200 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
201 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
203 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
204 hdev->adv_data_len = 0;
206 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
207 hdev->scan_rsp_data_len = 0;
209 hdev->le_scan_type = LE_SCAN_PASSIVE;
211 hdev->ssp_debug_mode = 0;
214 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
216 __u8 status = *((__u8 *) skb->data);
219 BT_DBG("%s status 0x%2.2x", hdev->name, status);
221 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
227 if (test_bit(HCI_MGMT, &hdev->dev_flags))
228 mgmt_set_local_name_complete(hdev, sent, status);
230 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
232 hci_dev_unlock(hdev);
235 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
237 struct hci_rp_read_local_name *rp = (void *) skb->data;
239 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
244 if (test_bit(HCI_SETUP, &hdev->dev_flags))
245 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
248 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
250 __u8 status = *((__u8 *) skb->data);
253 BT_DBG("%s status 0x%2.2x", hdev->name, status);
255 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
260 __u8 param = *((__u8 *) sent);
262 if (param == AUTH_ENABLED)
263 set_bit(HCI_AUTH, &hdev->flags);
265 clear_bit(HCI_AUTH, &hdev->flags);
268 if (test_bit(HCI_MGMT, &hdev->dev_flags))
269 mgmt_auth_enable_complete(hdev, status);
272 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
274 __u8 status = *((__u8 *) skb->data);
278 BT_DBG("%s status 0x%2.2x", hdev->name, status);
283 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
287 param = *((__u8 *) sent);
290 set_bit(HCI_ENCRYPT, &hdev->flags);
292 clear_bit(HCI_ENCRYPT, &hdev->flags);
295 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
297 __u8 status = *((__u8 *) skb->data);
301 BT_DBG("%s status 0x%2.2x", hdev->name, status);
303 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
307 param = *((__u8 *) sent);
312 hdev->discov_timeout = 0;
316 if (param & SCAN_INQUIRY)
317 set_bit(HCI_ISCAN, &hdev->flags);
319 clear_bit(HCI_ISCAN, &hdev->flags);
321 if (param & SCAN_PAGE)
322 set_bit(HCI_PSCAN, &hdev->flags);
324 clear_bit(HCI_ISCAN, &hdev->flags);
327 hci_dev_unlock(hdev);
330 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
332 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
334 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
339 memcpy(hdev->dev_class, rp->dev_class, 3);
341 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
342 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
345 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
347 __u8 status = *((__u8 *) skb->data);
350 BT_DBG("%s status 0x%2.2x", hdev->name, status);
352 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
359 memcpy(hdev->dev_class, sent, 3);
361 if (test_bit(HCI_MGMT, &hdev->dev_flags))
362 mgmt_set_class_of_dev_complete(hdev, sent, status);
364 hci_dev_unlock(hdev);
367 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
369 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
372 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
377 setting = __le16_to_cpu(rp->voice_setting);
379 if (hdev->voice_setting == setting)
382 hdev->voice_setting = setting;
384 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
387 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
390 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
393 __u8 status = *((__u8 *) skb->data);
397 BT_DBG("%s status 0x%2.2x", hdev->name, status);
402 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
406 setting = get_unaligned_le16(sent);
408 if (hdev->voice_setting == setting)
411 hdev->voice_setting = setting;
413 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
416 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
419 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
422 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
424 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
429 hdev->num_iac = rp->num_iac;
431 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
434 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
436 __u8 status = *((__u8 *) skb->data);
437 struct hci_cp_write_ssp_mode *sent;
439 BT_DBG("%s status 0x%2.2x", hdev->name, status);
441 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
447 hdev->features[1][0] |= LMP_HOST_SSP;
449 hdev->features[1][0] &= ~LMP_HOST_SSP;
452 if (test_bit(HCI_MGMT, &hdev->dev_flags))
453 mgmt_ssp_enable_complete(hdev, sent->mode, status);
456 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
458 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
462 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
464 u8 status = *((u8 *) skb->data);
465 struct hci_cp_write_sc_support *sent;
467 BT_DBG("%s status 0x%2.2x", hdev->name, status);
469 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
475 hdev->features[1][0] |= LMP_HOST_SC;
477 hdev->features[1][0] &= ~LMP_HOST_SC;
480 if (test_bit(HCI_MGMT, &hdev->dev_flags))
481 mgmt_sc_enable_complete(hdev, sent->support, status);
484 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
486 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
490 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
492 struct hci_rp_read_local_version *rp = (void *) skb->data;
494 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
499 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
500 hdev->hci_ver = rp->hci_ver;
501 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
502 hdev->lmp_ver = rp->lmp_ver;
503 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
504 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
508 static void hci_cc_read_local_commands(struct hci_dev *hdev,
511 struct hci_rp_read_local_commands *rp = (void *) skb->data;
513 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
518 if (test_bit(HCI_SETUP, &hdev->dev_flags))
519 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
522 static void hci_cc_read_local_features(struct hci_dev *hdev,
525 struct hci_rp_read_local_features *rp = (void *) skb->data;
527 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
532 memcpy(hdev->features, rp->features, 8);
534 /* Adjust default settings according to features
535 * supported by device. */
537 if (hdev->features[0][0] & LMP_3SLOT)
538 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
540 if (hdev->features[0][0] & LMP_5SLOT)
541 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
543 if (hdev->features[0][1] & LMP_HV2) {
544 hdev->pkt_type |= (HCI_HV2);
545 hdev->esco_type |= (ESCO_HV2);
548 if (hdev->features[0][1] & LMP_HV3) {
549 hdev->pkt_type |= (HCI_HV3);
550 hdev->esco_type |= (ESCO_HV3);
553 if (lmp_esco_capable(hdev))
554 hdev->esco_type |= (ESCO_EV3);
556 if (hdev->features[0][4] & LMP_EV4)
557 hdev->esco_type |= (ESCO_EV4);
559 if (hdev->features[0][4] & LMP_EV5)
560 hdev->esco_type |= (ESCO_EV5);
562 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
563 hdev->esco_type |= (ESCO_2EV3);
565 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
566 hdev->esco_type |= (ESCO_3EV3);
568 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
569 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
572 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
575 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
577 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
582 if (hdev->max_page < rp->max_page)
583 hdev->max_page = rp->max_page;
585 if (rp->page < HCI_MAX_PAGES)
586 memcpy(hdev->features[rp->page], rp->features, 8);
589 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
592 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
594 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
599 hdev->flow_ctl_mode = rp->mode;
602 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
604 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
606 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
611 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
612 hdev->sco_mtu = rp->sco_mtu;
613 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
614 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
616 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
621 hdev->acl_cnt = hdev->acl_pkts;
622 hdev->sco_cnt = hdev->sco_pkts;
624 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
625 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
628 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
630 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
632 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
637 if (test_bit(HCI_INIT, &hdev->flags))
638 bacpy(&hdev->bdaddr, &rp->bdaddr);
640 if (test_bit(HCI_SETUP, &hdev->dev_flags))
641 bacpy(&hdev->setup_addr, &rp->bdaddr);
644 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
647 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
649 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
654 if (test_bit(HCI_INIT, &hdev->flags)) {
655 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
656 hdev->page_scan_window = __le16_to_cpu(rp->window);
660 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
663 u8 status = *((u8 *) skb->data);
664 struct hci_cp_write_page_scan_activity *sent;
666 BT_DBG("%s status 0x%2.2x", hdev->name, status);
671 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
675 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
676 hdev->page_scan_window = __le16_to_cpu(sent->window);
679 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
682 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
684 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
689 if (test_bit(HCI_INIT, &hdev->flags))
690 hdev->page_scan_type = rp->type;
693 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
696 u8 status = *((u8 *) skb->data);
699 BT_DBG("%s status 0x%2.2x", hdev->name, status);
704 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
706 hdev->page_scan_type = *type;
709 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
712 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
714 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
719 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
720 hdev->block_len = __le16_to_cpu(rp->block_len);
721 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
723 hdev->block_cnt = hdev->num_blocks;
725 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
726 hdev->block_cnt, hdev->block_len);
729 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
731 struct hci_rp_read_clock *rp = (void *) skb->data;
732 struct hci_cp_read_clock *cp;
733 struct hci_conn *conn;
735 BT_DBG("%s", hdev->name);
737 if (skb->len < sizeof(*rp))
745 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
749 if (cp->which == 0x00) {
750 hdev->clock = le32_to_cpu(rp->clock);
754 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
756 conn->clock = le32_to_cpu(rp->clock);
757 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
761 hci_dev_unlock(hdev);
764 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
767 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
769 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
774 hdev->amp_status = rp->amp_status;
775 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
776 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
777 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
778 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
779 hdev->amp_type = rp->amp_type;
780 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
781 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
782 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
783 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
786 a2mp_send_getinfo_rsp(hdev);
789 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
792 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
793 struct amp_assoc *assoc = &hdev->loc_assoc;
794 size_t rem_len, frag_len;
796 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
801 frag_len = skb->len - sizeof(*rp);
802 rem_len = __le16_to_cpu(rp->rem_len);
804 if (rem_len > frag_len) {
805 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
807 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
808 assoc->offset += frag_len;
810 /* Read other fragments */
811 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
816 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
817 assoc->len = assoc->offset + rem_len;
821 /* Send A2MP Rsp when all fragments are received */
822 a2mp_send_getampassoc_rsp(hdev, rp->status);
823 a2mp_send_create_phy_link_req(hdev, rp->status);
826 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
829 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
831 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
836 hdev->inq_tx_power = rp->tx_power;
839 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
841 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
842 struct hci_cp_pin_code_reply *cp;
843 struct hci_conn *conn;
845 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
849 if (test_bit(HCI_MGMT, &hdev->dev_flags))
850 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
855 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
859 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
861 conn->pin_length = cp->pin_len;
864 hci_dev_unlock(hdev);
867 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
869 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
871 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
875 if (test_bit(HCI_MGMT, &hdev->dev_flags))
876 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
879 hci_dev_unlock(hdev);
882 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
885 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
887 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
892 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
893 hdev->le_pkts = rp->le_max_pkt;
895 hdev->le_cnt = hdev->le_pkts;
897 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
900 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
903 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
905 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
910 memcpy(hdev->le_features, rp->features, 8);
913 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
916 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
918 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
923 hdev->adv_tx_power = rp->tx_power;
926 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
928 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
930 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
934 if (test_bit(HCI_MGMT, &hdev->dev_flags))
935 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
938 hci_dev_unlock(hdev);
941 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
944 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
946 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
950 if (test_bit(HCI_MGMT, &hdev->dev_flags))
951 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
952 ACL_LINK, 0, rp->status);
954 hci_dev_unlock(hdev);
957 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
959 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
961 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
965 if (test_bit(HCI_MGMT, &hdev->dev_flags))
966 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
969 hci_dev_unlock(hdev);
972 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
975 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
977 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
981 if (test_bit(HCI_MGMT, &hdev->dev_flags))
982 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
983 ACL_LINK, 0, rp->status);
985 hci_dev_unlock(hdev);
988 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
991 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
993 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
996 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
997 NULL, NULL, rp->status);
998 hci_dev_unlock(hdev);
1001 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1002 struct sk_buff *skb)
1004 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1006 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1009 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
1010 rp->hash256, rp->randomizer256,
1012 hci_dev_unlock(hdev);
1016 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1018 __u8 status = *((__u8 *) skb->data);
1021 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1026 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1032 bacpy(&hdev->random_addr, sent);
1034 hci_dev_unlock(hdev);
1037 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1039 __u8 *sent, status = *((__u8 *) skb->data);
1041 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1046 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1052 /* If we're doing connection initation as peripheral. Set a
1053 * timeout in case something goes wrong.
1056 struct hci_conn *conn;
1058 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1060 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1062 queue_delayed_work(hdev->workqueue,
1063 &conn->le_conn_timeout,
1064 conn->conn_timeout);
1066 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1069 hci_dev_unlock(hdev);
1072 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1074 struct hci_cp_le_set_scan_param *cp;
1075 __u8 status = *((__u8 *) skb->data);
1077 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1082 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1088 hdev->le_scan_type = cp->type;
1090 hci_dev_unlock(hdev);
1093 static bool has_pending_adv_report(struct hci_dev *hdev)
1095 struct discovery_state *d = &hdev->discovery;
1097 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1100 static void clear_pending_adv_report(struct hci_dev *hdev)
1102 struct discovery_state *d = &hdev->discovery;
1104 bacpy(&d->last_adv_addr, BDADDR_ANY);
1105 d->last_adv_data_len = 0;
1108 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1109 u8 bdaddr_type, s8 rssi, u32 flags,
1112 struct discovery_state *d = &hdev->discovery;
1114 bacpy(&d->last_adv_addr, bdaddr);
1115 d->last_adv_addr_type = bdaddr_type;
1116 d->last_adv_rssi = rssi;
1117 d->last_adv_flags = flags;
1118 memcpy(d->last_adv_data, data, len);
1119 d->last_adv_data_len = len;
1122 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1123 struct sk_buff *skb)
1125 struct hci_cp_le_set_scan_enable *cp;
1126 __u8 status = *((__u8 *) skb->data);
1128 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1133 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1137 switch (cp->enable) {
1138 case LE_SCAN_ENABLE:
1139 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1140 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1141 clear_pending_adv_report(hdev);
1144 case LE_SCAN_DISABLE:
1145 /* We do this here instead of when setting DISCOVERY_STOPPED
1146 * since the latter would potentially require waiting for
1147 * inquiry to stop too.
1149 if (has_pending_adv_report(hdev)) {
1150 struct discovery_state *d = &hdev->discovery;
1152 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1153 d->last_adv_addr_type, NULL,
1154 d->last_adv_rssi, d->last_adv_flags,
1156 d->last_adv_data_len, NULL, 0);
1159 /* Cancel this timer so that we don't try to disable scanning
1160 * when it's already disabled.
1162 cancel_delayed_work(&hdev->le_scan_disable);
1164 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1166 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1167 * interrupted scanning due to a connect request. Mark
1168 * therefore discovery as stopped. If this was not
1169 * because of a connect request advertising might have
1170 * been disabled because of active scanning, so
1171 * re-enable it again if necessary.
1173 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1175 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1176 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1177 hdev->discovery.state == DISCOVERY_FINDING)
1178 mgmt_reenable_advertising(hdev);
1183 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1188 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1189 struct sk_buff *skb)
1191 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1193 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1198 hdev->le_white_list_size = rp->size;
1201 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1202 struct sk_buff *skb)
1204 __u8 status = *((__u8 *) skb->data);
1206 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1211 hci_bdaddr_list_clear(&hdev->le_white_list);
1214 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1215 struct sk_buff *skb)
1217 struct hci_cp_le_add_to_white_list *sent;
1218 __u8 status = *((__u8 *) skb->data);
1220 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1225 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1229 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1233 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1234 struct sk_buff *skb)
1236 struct hci_cp_le_del_from_white_list *sent;
1237 __u8 status = *((__u8 *) skb->data);
1239 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1244 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1248 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1252 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1253 struct sk_buff *skb)
1255 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1257 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1262 memcpy(hdev->le_states, rp->le_states, 8);
1265 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1266 struct sk_buff *skb)
1268 struct hci_cp_write_le_host_supported *sent;
1269 __u8 status = *((__u8 *) skb->data);
1271 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1276 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1281 hdev->features[1][0] |= LMP_HOST_LE;
1282 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1284 hdev->features[1][0] &= ~LMP_HOST_LE;
1285 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1286 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1290 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1292 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1295 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1297 struct hci_cp_le_set_adv_param *cp;
1298 u8 status = *((u8 *) skb->data);
1300 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1305 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1310 hdev->adv_addr_type = cp->own_address_type;
1311 hci_dev_unlock(hdev);
1314 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1315 struct sk_buff *skb)
1317 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1319 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1320 hdev->name, rp->status, rp->phy_handle);
1325 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1328 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1330 struct hci_rp_read_rssi *rp = (void *) skb->data;
1331 struct hci_conn *conn;
1333 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1340 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1342 conn->rssi = rp->rssi;
1344 hci_dev_unlock(hdev);
1347 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1349 struct hci_cp_read_tx_power *sent;
1350 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1351 struct hci_conn *conn;
1353 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1358 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1364 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1368 switch (sent->type) {
1370 conn->tx_power = rp->tx_power;
1373 conn->max_tx_power = rp->tx_power;
1378 hci_dev_unlock(hdev);
1381 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1383 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1386 hci_conn_check_pending(hdev);
1390 set_bit(HCI_INQUIRY, &hdev->flags);
1393 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1395 struct hci_cp_create_conn *cp;
1396 struct hci_conn *conn;
1398 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1400 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1406 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1408 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1411 if (conn && conn->state == BT_CONNECT) {
1412 if (status != 0x0c || conn->attempt > 2) {
1413 conn->state = BT_CLOSED;
1414 hci_proto_connect_cfm(conn, status);
1417 conn->state = BT_CONNECT2;
1421 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1424 set_bit(HCI_CONN_MASTER, &conn->flags);
1426 BT_ERR("No memory for new connection");
1430 hci_dev_unlock(hdev);
1433 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1435 struct hci_cp_add_sco *cp;
1436 struct hci_conn *acl, *sco;
1439 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1444 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1448 handle = __le16_to_cpu(cp->handle);
1450 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1454 acl = hci_conn_hash_lookup_handle(hdev, handle);
1458 sco->state = BT_CLOSED;
1460 hci_proto_connect_cfm(sco, status);
1465 hci_dev_unlock(hdev);
1468 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1470 struct hci_cp_auth_requested *cp;
1471 struct hci_conn *conn;
1473 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1478 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1484 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1486 if (conn->state == BT_CONFIG) {
1487 hci_proto_connect_cfm(conn, status);
1488 hci_conn_drop(conn);
1492 hci_dev_unlock(hdev);
1495 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1497 struct hci_cp_set_conn_encrypt *cp;
1498 struct hci_conn *conn;
1500 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1505 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1511 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1513 if (conn->state == BT_CONFIG) {
1514 hci_proto_connect_cfm(conn, status);
1515 hci_conn_drop(conn);
1519 hci_dev_unlock(hdev);
1522 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1523 struct hci_conn *conn)
1525 if (conn->state != BT_CONFIG || !conn->out)
1528 if (conn->pending_sec_level == BT_SECURITY_SDP)
1531 /* Only request authentication for SSP connections or non-SSP
1532 * devices with sec_level MEDIUM or HIGH or if MITM protection
1535 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1536 conn->pending_sec_level != BT_SECURITY_FIPS &&
1537 conn->pending_sec_level != BT_SECURITY_HIGH &&
1538 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1544 static int hci_resolve_name(struct hci_dev *hdev,
1545 struct inquiry_entry *e)
1547 struct hci_cp_remote_name_req cp;
1549 memset(&cp, 0, sizeof(cp));
1551 bacpy(&cp.bdaddr, &e->data.bdaddr);
1552 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1553 cp.pscan_mode = e->data.pscan_mode;
1554 cp.clock_offset = e->data.clock_offset;
1556 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1559 static bool hci_resolve_next_name(struct hci_dev *hdev)
1561 struct discovery_state *discov = &hdev->discovery;
1562 struct inquiry_entry *e;
1564 if (list_empty(&discov->resolve))
1567 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1571 if (hci_resolve_name(hdev, e) == 0) {
1572 e->name_state = NAME_PENDING;
1579 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1580 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1582 struct discovery_state *discov = &hdev->discovery;
1583 struct inquiry_entry *e;
1585 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1586 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1587 name_len, conn->dev_class);
1589 if (discov->state == DISCOVERY_STOPPED)
1592 if (discov->state == DISCOVERY_STOPPING)
1593 goto discov_complete;
1595 if (discov->state != DISCOVERY_RESOLVING)
1598 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1599 /* If the device was not found in a list of found devices names of which
1600 * are pending. there is no need to continue resolving a next name as it
1601 * will be done upon receiving another Remote Name Request Complete
1608 e->name_state = NAME_KNOWN;
1609 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1610 e->data.rssi, name, name_len);
1612 e->name_state = NAME_NOT_KNOWN;
1615 if (hci_resolve_next_name(hdev))
1619 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1622 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1624 struct hci_cp_remote_name_req *cp;
1625 struct hci_conn *conn;
1627 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1629 /* If successful wait for the name req complete event before
1630 * checking for the need to do authentication */
1634 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1640 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1642 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1643 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1648 if (!hci_outgoing_auth_needed(hdev, conn))
1651 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1652 struct hci_cp_auth_requested auth_cp;
1654 auth_cp.handle = __cpu_to_le16(conn->handle);
1655 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1656 sizeof(auth_cp), &auth_cp);
1660 hci_dev_unlock(hdev);
1663 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1665 struct hci_cp_read_remote_features *cp;
1666 struct hci_conn *conn;
1668 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1673 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1679 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1681 if (conn->state == BT_CONFIG) {
1682 hci_proto_connect_cfm(conn, status);
1683 hci_conn_drop(conn);
1687 hci_dev_unlock(hdev);
1690 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1692 struct hci_cp_read_remote_ext_features *cp;
1693 struct hci_conn *conn;
1695 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1700 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1706 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1708 if (conn->state == BT_CONFIG) {
1709 hci_proto_connect_cfm(conn, status);
1710 hci_conn_drop(conn);
1714 hci_dev_unlock(hdev);
1717 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1719 struct hci_cp_setup_sync_conn *cp;
1720 struct hci_conn *acl, *sco;
1723 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1728 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1732 handle = __le16_to_cpu(cp->handle);
1734 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1738 acl = hci_conn_hash_lookup_handle(hdev, handle);
1742 sco->state = BT_CLOSED;
1744 hci_proto_connect_cfm(sco, status);
1749 hci_dev_unlock(hdev);
1752 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1754 struct hci_cp_sniff_mode *cp;
1755 struct hci_conn *conn;
1757 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1762 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1768 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1770 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1772 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1773 hci_sco_setup(conn, status);
1776 hci_dev_unlock(hdev);
1779 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1781 struct hci_cp_exit_sniff_mode *cp;
1782 struct hci_conn *conn;
1784 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1789 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1795 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1797 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1799 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1800 hci_sco_setup(conn, status);
1803 hci_dev_unlock(hdev);
1806 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1808 struct hci_cp_disconnect *cp;
1809 struct hci_conn *conn;
1814 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1820 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1822 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1823 conn->dst_type, status);
1825 hci_dev_unlock(hdev);
1828 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1830 struct hci_cp_create_phy_link *cp;
1832 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1834 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1841 struct hci_conn *hcon;
1843 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1847 amp_write_remote_assoc(hdev, cp->phy_handle);
1850 hci_dev_unlock(hdev);
1853 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1855 struct hci_cp_accept_phy_link *cp;
1857 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1862 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1866 amp_write_remote_assoc(hdev, cp->phy_handle);
1869 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1871 struct hci_cp_le_create_conn *cp;
1872 struct hci_conn *conn;
1874 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1876 /* All connection failure handling is taken care of by the
1877 * hci_le_conn_failed function which is triggered by the HCI
1878 * request completion callbacks used for connecting.
1883 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1889 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1893 /* Store the initiator and responder address information which
1894 * is needed for SMP. These values will not change during the
1895 * lifetime of the connection.
1897 conn->init_addr_type = cp->own_address_type;
1898 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1899 bacpy(&conn->init_addr, &hdev->random_addr);
1901 bacpy(&conn->init_addr, &hdev->bdaddr);
1903 conn->resp_addr_type = cp->peer_addr_type;
1904 bacpy(&conn->resp_addr, &cp->peer_addr);
1906 /* We don't want the connection attempt to stick around
1907 * indefinitely since LE doesn't have a page timeout concept
1908 * like BR/EDR. Set a timer for any connection that doesn't use
1909 * the white list for connecting.
1911 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1912 queue_delayed_work(conn->hdev->workqueue,
1913 &conn->le_conn_timeout,
1914 conn->conn_timeout);
1917 hci_dev_unlock(hdev);
1920 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1922 struct hci_cp_le_start_enc *cp;
1923 struct hci_conn *conn;
1925 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1932 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1936 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1940 if (conn->state != BT_CONNECTED)
1943 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1944 hci_conn_drop(conn);
1947 hci_dev_unlock(hdev);
1950 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1952 __u8 status = *((__u8 *) skb->data);
1953 struct discovery_state *discov = &hdev->discovery;
1954 struct inquiry_entry *e;
1956 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1958 hci_conn_check_pending(hdev);
1960 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1963 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1964 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1966 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1971 if (discov->state != DISCOVERY_FINDING)
1974 if (list_empty(&discov->resolve)) {
1975 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1979 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1980 if (e && hci_resolve_name(hdev, e) == 0) {
1981 e->name_state = NAME_PENDING;
1982 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1984 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1988 hci_dev_unlock(hdev);
1991 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1993 struct inquiry_data data;
1994 struct inquiry_info *info = (void *) (skb->data + 1);
1995 int num_rsp = *((__u8 *) skb->data);
1997 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2002 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2007 for (; num_rsp; num_rsp--, info++) {
2010 bacpy(&data.bdaddr, &info->bdaddr);
2011 data.pscan_rep_mode = info->pscan_rep_mode;
2012 data.pscan_period_mode = info->pscan_period_mode;
2013 data.pscan_mode = info->pscan_mode;
2014 memcpy(data.dev_class, info->dev_class, 3);
2015 data.clock_offset = info->clock_offset;
2017 data.ssp_mode = 0x00;
2019 flags = hci_inquiry_cache_update(hdev, &data, false);
2021 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2022 info->dev_class, 0, flags, NULL, 0, NULL, 0);
2025 hci_dev_unlock(hdev);
2028 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2030 struct hci_ev_conn_complete *ev = (void *) skb->data;
2031 struct hci_conn *conn;
2033 BT_DBG("%s", hdev->name);
2037 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2039 if (ev->link_type != SCO_LINK)
2042 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2046 conn->type = SCO_LINK;
2050 conn->handle = __le16_to_cpu(ev->handle);
2052 if (conn->type == ACL_LINK) {
2053 conn->state = BT_CONFIG;
2054 hci_conn_hold(conn);
2056 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2057 !hci_find_link_key(hdev, &ev->bdaddr))
2058 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2060 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2062 conn->state = BT_CONNECTED;
2064 hci_conn_add_sysfs(conn);
2066 if (test_bit(HCI_AUTH, &hdev->flags))
2067 set_bit(HCI_CONN_AUTH, &conn->flags);
2069 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2070 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2072 /* Get remote features */
2073 if (conn->type == ACL_LINK) {
2074 struct hci_cp_read_remote_features cp;
2075 cp.handle = ev->handle;
2076 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2080 /* Set packet type for incoming connection */
2081 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2082 struct hci_cp_change_conn_ptype cp;
2083 cp.handle = ev->handle;
2084 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2085 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2089 conn->state = BT_CLOSED;
2090 if (conn->type == ACL_LINK)
2091 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2092 conn->dst_type, ev->status);
2095 if (conn->type == ACL_LINK)
2096 hci_sco_setup(conn, ev->status);
2099 hci_proto_connect_cfm(conn, ev->status);
2101 } else if (ev->link_type != ACL_LINK)
2102 hci_proto_connect_cfm(conn, ev->status);
2105 hci_dev_unlock(hdev);
2107 hci_conn_check_pending(hdev);
2110 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2112 struct hci_cp_reject_conn_req cp;
2114 bacpy(&cp.bdaddr, bdaddr);
2115 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2116 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2119 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2121 struct hci_ev_conn_request *ev = (void *) skb->data;
2122 int mask = hdev->link_mode;
2123 struct inquiry_entry *ie;
2124 struct hci_conn *conn;
2127 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2130 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2133 if (!(mask & HCI_LM_ACCEPT)) {
2134 hci_reject_conn(hdev, &ev->bdaddr);
2138 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
2139 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2141 hci_reject_conn(hdev, &ev->bdaddr);
2145 if (!hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2147 hci_reject_conn(hdev, &ev->bdaddr);
2152 /* Connection accepted */
2156 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2158 memcpy(ie->data.dev_class, ev->dev_class, 3);
2160 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2163 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2165 BT_ERR("No memory for new connection");
2166 hci_dev_unlock(hdev);
2171 memcpy(conn->dev_class, ev->dev_class, 3);
2173 hci_dev_unlock(hdev);
2175 if (ev->link_type == ACL_LINK ||
2176 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2177 struct hci_cp_accept_conn_req cp;
2178 conn->state = BT_CONNECT;
2180 bacpy(&cp.bdaddr, &ev->bdaddr);
2182 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2183 cp.role = 0x00; /* Become master */
2185 cp.role = 0x01; /* Remain slave */
2187 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2188 } else if (!(flags & HCI_PROTO_DEFER)) {
2189 struct hci_cp_accept_sync_conn_req cp;
2190 conn->state = BT_CONNECT;
2192 bacpy(&cp.bdaddr, &ev->bdaddr);
2193 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2195 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2196 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2197 cp.max_latency = cpu_to_le16(0xffff);
2198 cp.content_format = cpu_to_le16(hdev->voice_setting);
2199 cp.retrans_effort = 0xff;
2201 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2204 conn->state = BT_CONNECT2;
2205 hci_proto_connect_cfm(conn, 0);
2209 static u8 hci_to_mgmt_reason(u8 err)
2212 case HCI_ERROR_CONNECTION_TIMEOUT:
2213 return MGMT_DEV_DISCONN_TIMEOUT;
2214 case HCI_ERROR_REMOTE_USER_TERM:
2215 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2216 case HCI_ERROR_REMOTE_POWER_OFF:
2217 return MGMT_DEV_DISCONN_REMOTE;
2218 case HCI_ERROR_LOCAL_HOST_TERM:
2219 return MGMT_DEV_DISCONN_LOCAL_HOST;
2221 return MGMT_DEV_DISCONN_UNKNOWN;
2225 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2227 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2228 u8 reason = hci_to_mgmt_reason(ev->reason);
2229 struct hci_conn_params *params;
2230 struct hci_conn *conn;
2231 bool mgmt_connected;
2234 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2238 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2243 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2244 conn->dst_type, ev->status);
2248 conn->state = BT_CLOSED;
2250 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2251 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2252 reason, mgmt_connected);
2254 if (conn->type == ACL_LINK &&
2255 test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2256 hci_remove_link_key(hdev, &conn->dst);
2258 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2260 switch (params->auto_connect) {
2261 case HCI_AUTO_CONN_LINK_LOSS:
2262 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2266 case HCI_AUTO_CONN_ALWAYS:
2267 list_del_init(¶ms->action);
2268 list_add(¶ms->action, &hdev->pend_le_conns);
2269 hci_update_background_scan(hdev);
2279 hci_proto_disconn_cfm(conn, ev->reason);
2282 /* Re-enable advertising if necessary, since it might
2283 * have been disabled by the connection. From the
2284 * HCI_LE_Set_Advertise_Enable command description in
2285 * the core specification (v4.0):
2286 * "The Controller shall continue advertising until the Host
2287 * issues an LE_Set_Advertise_Enable command with
2288 * Advertising_Enable set to 0x00 (Advertising is disabled)
2289 * or until a connection is created or until the Advertising
2290 * is timed out due to Directed Advertising."
2292 if (type == LE_LINK)
2293 mgmt_reenable_advertising(hdev);
2296 hci_dev_unlock(hdev);
2299 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2301 struct hci_ev_auth_complete *ev = (void *) skb->data;
2302 struct hci_conn *conn;
2304 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2308 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2313 if (!hci_conn_ssp_enabled(conn) &&
2314 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2315 BT_INFO("re-auth of legacy device is not possible.");
2317 set_bit(HCI_CONN_AUTH, &conn->flags);
2318 conn->sec_level = conn->pending_sec_level;
2321 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2325 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2326 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2328 if (conn->state == BT_CONFIG) {
2329 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2330 struct hci_cp_set_conn_encrypt cp;
2331 cp.handle = ev->handle;
2333 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2336 conn->state = BT_CONNECTED;
2337 hci_proto_connect_cfm(conn, ev->status);
2338 hci_conn_drop(conn);
2341 hci_auth_cfm(conn, ev->status);
2343 hci_conn_hold(conn);
2344 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2345 hci_conn_drop(conn);
2348 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2350 struct hci_cp_set_conn_encrypt cp;
2351 cp.handle = ev->handle;
2353 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2356 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2357 hci_encrypt_cfm(conn, ev->status, 0x00);
2362 hci_dev_unlock(hdev);
2365 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2367 struct hci_ev_remote_name *ev = (void *) skb->data;
2368 struct hci_conn *conn;
2370 BT_DBG("%s", hdev->name);
2372 hci_conn_check_pending(hdev);
2376 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2378 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2381 if (ev->status == 0)
2382 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2383 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2385 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2391 if (!hci_outgoing_auth_needed(hdev, conn))
2394 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2395 struct hci_cp_auth_requested cp;
2396 cp.handle = __cpu_to_le16(conn->handle);
2397 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2401 hci_dev_unlock(hdev);
2404 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2406 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2407 struct hci_conn *conn;
2409 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2413 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2419 /* Encryption implies authentication */
2420 set_bit(HCI_CONN_AUTH, &conn->flags);
2421 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2422 conn->sec_level = conn->pending_sec_level;
2424 /* P-256 authentication key implies FIPS */
2425 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2426 set_bit(HCI_CONN_FIPS, &conn->flags);
2428 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2429 conn->type == LE_LINK)
2430 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2432 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2433 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2437 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2439 if (ev->status && conn->state == BT_CONNECTED) {
2440 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2441 hci_conn_drop(conn);
2445 if (conn->state == BT_CONFIG) {
2447 conn->state = BT_CONNECTED;
2449 /* In Secure Connections Only mode, do not allow any
2450 * connections that are not encrypted with AES-CCM
2451 * using a P-256 authenticated combination key.
2453 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2454 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2455 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2456 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2457 hci_conn_drop(conn);
2461 hci_proto_connect_cfm(conn, ev->status);
2462 hci_conn_drop(conn);
2464 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2467 hci_dev_unlock(hdev);
2470 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2471 struct sk_buff *skb)
2473 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2474 struct hci_conn *conn;
2476 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2480 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2483 set_bit(HCI_CONN_SECURE, &conn->flags);
2485 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2487 hci_key_change_cfm(conn, ev->status);
2490 hci_dev_unlock(hdev);
2493 static void hci_remote_features_evt(struct hci_dev *hdev,
2494 struct sk_buff *skb)
2496 struct hci_ev_remote_features *ev = (void *) skb->data;
2497 struct hci_conn *conn;
2499 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2503 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2508 memcpy(conn->features[0], ev->features, 8);
2510 if (conn->state != BT_CONFIG)
2513 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2514 struct hci_cp_read_remote_ext_features cp;
2515 cp.handle = ev->handle;
2517 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2522 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2523 struct hci_cp_remote_name_req cp;
2524 memset(&cp, 0, sizeof(cp));
2525 bacpy(&cp.bdaddr, &conn->dst);
2526 cp.pscan_rep_mode = 0x02;
2527 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2528 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2529 mgmt_device_connected(hdev, &conn->dst, conn->type,
2530 conn->dst_type, 0, NULL, 0,
2533 if (!hci_outgoing_auth_needed(hdev, conn)) {
2534 conn->state = BT_CONNECTED;
2535 hci_proto_connect_cfm(conn, ev->status);
2536 hci_conn_drop(conn);
2540 hci_dev_unlock(hdev);
2543 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2545 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2546 u8 status = skb->data[sizeof(*ev)];
2549 skb_pull(skb, sizeof(*ev));
2551 opcode = __le16_to_cpu(ev->opcode);
2554 case HCI_OP_INQUIRY_CANCEL:
2555 hci_cc_inquiry_cancel(hdev, skb);
2558 case HCI_OP_PERIODIC_INQ:
2559 hci_cc_periodic_inq(hdev, skb);
2562 case HCI_OP_EXIT_PERIODIC_INQ:
2563 hci_cc_exit_periodic_inq(hdev, skb);
2566 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2567 hci_cc_remote_name_req_cancel(hdev, skb);
2570 case HCI_OP_ROLE_DISCOVERY:
2571 hci_cc_role_discovery(hdev, skb);
2574 case HCI_OP_READ_LINK_POLICY:
2575 hci_cc_read_link_policy(hdev, skb);
2578 case HCI_OP_WRITE_LINK_POLICY:
2579 hci_cc_write_link_policy(hdev, skb);
2582 case HCI_OP_READ_DEF_LINK_POLICY:
2583 hci_cc_read_def_link_policy(hdev, skb);
2586 case HCI_OP_WRITE_DEF_LINK_POLICY:
2587 hci_cc_write_def_link_policy(hdev, skb);
2591 hci_cc_reset(hdev, skb);
2594 case HCI_OP_WRITE_LOCAL_NAME:
2595 hci_cc_write_local_name(hdev, skb);
2598 case HCI_OP_READ_LOCAL_NAME:
2599 hci_cc_read_local_name(hdev, skb);
2602 case HCI_OP_WRITE_AUTH_ENABLE:
2603 hci_cc_write_auth_enable(hdev, skb);
2606 case HCI_OP_WRITE_ENCRYPT_MODE:
2607 hci_cc_write_encrypt_mode(hdev, skb);
2610 case HCI_OP_WRITE_SCAN_ENABLE:
2611 hci_cc_write_scan_enable(hdev, skb);
2614 case HCI_OP_READ_CLASS_OF_DEV:
2615 hci_cc_read_class_of_dev(hdev, skb);
2618 case HCI_OP_WRITE_CLASS_OF_DEV:
2619 hci_cc_write_class_of_dev(hdev, skb);
2622 case HCI_OP_READ_VOICE_SETTING:
2623 hci_cc_read_voice_setting(hdev, skb);
2626 case HCI_OP_WRITE_VOICE_SETTING:
2627 hci_cc_write_voice_setting(hdev, skb);
2630 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2631 hci_cc_read_num_supported_iac(hdev, skb);
2634 case HCI_OP_WRITE_SSP_MODE:
2635 hci_cc_write_ssp_mode(hdev, skb);
2638 case HCI_OP_WRITE_SC_SUPPORT:
2639 hci_cc_write_sc_support(hdev, skb);
2642 case HCI_OP_READ_LOCAL_VERSION:
2643 hci_cc_read_local_version(hdev, skb);
2646 case HCI_OP_READ_LOCAL_COMMANDS:
2647 hci_cc_read_local_commands(hdev, skb);
2650 case HCI_OP_READ_LOCAL_FEATURES:
2651 hci_cc_read_local_features(hdev, skb);
2654 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2655 hci_cc_read_local_ext_features(hdev, skb);
2658 case HCI_OP_READ_BUFFER_SIZE:
2659 hci_cc_read_buffer_size(hdev, skb);
2662 case HCI_OP_READ_BD_ADDR:
2663 hci_cc_read_bd_addr(hdev, skb);
2666 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2667 hci_cc_read_page_scan_activity(hdev, skb);
2670 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2671 hci_cc_write_page_scan_activity(hdev, skb);
2674 case HCI_OP_READ_PAGE_SCAN_TYPE:
2675 hci_cc_read_page_scan_type(hdev, skb);
2678 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2679 hci_cc_write_page_scan_type(hdev, skb);
2682 case HCI_OP_READ_DATA_BLOCK_SIZE:
2683 hci_cc_read_data_block_size(hdev, skb);
2686 case HCI_OP_READ_FLOW_CONTROL_MODE:
2687 hci_cc_read_flow_control_mode(hdev, skb);
2690 case HCI_OP_READ_LOCAL_AMP_INFO:
2691 hci_cc_read_local_amp_info(hdev, skb);
2694 case HCI_OP_READ_CLOCK:
2695 hci_cc_read_clock(hdev, skb);
2698 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2699 hci_cc_read_local_amp_assoc(hdev, skb);
2702 case HCI_OP_READ_INQ_RSP_TX_POWER:
2703 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2706 case HCI_OP_PIN_CODE_REPLY:
2707 hci_cc_pin_code_reply(hdev, skb);
2710 case HCI_OP_PIN_CODE_NEG_REPLY:
2711 hci_cc_pin_code_neg_reply(hdev, skb);
2714 case HCI_OP_READ_LOCAL_OOB_DATA:
2715 hci_cc_read_local_oob_data(hdev, skb);
2718 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2719 hci_cc_read_local_oob_ext_data(hdev, skb);
2722 case HCI_OP_LE_READ_BUFFER_SIZE:
2723 hci_cc_le_read_buffer_size(hdev, skb);
2726 case HCI_OP_LE_READ_LOCAL_FEATURES:
2727 hci_cc_le_read_local_features(hdev, skb);
2730 case HCI_OP_LE_READ_ADV_TX_POWER:
2731 hci_cc_le_read_adv_tx_power(hdev, skb);
2734 case HCI_OP_USER_CONFIRM_REPLY:
2735 hci_cc_user_confirm_reply(hdev, skb);
2738 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2739 hci_cc_user_confirm_neg_reply(hdev, skb);
2742 case HCI_OP_USER_PASSKEY_REPLY:
2743 hci_cc_user_passkey_reply(hdev, skb);
2746 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2747 hci_cc_user_passkey_neg_reply(hdev, skb);
2750 case HCI_OP_LE_SET_RANDOM_ADDR:
2751 hci_cc_le_set_random_addr(hdev, skb);
2754 case HCI_OP_LE_SET_ADV_ENABLE:
2755 hci_cc_le_set_adv_enable(hdev, skb);
2758 case HCI_OP_LE_SET_SCAN_PARAM:
2759 hci_cc_le_set_scan_param(hdev, skb);
2762 case HCI_OP_LE_SET_SCAN_ENABLE:
2763 hci_cc_le_set_scan_enable(hdev, skb);
2766 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2767 hci_cc_le_read_white_list_size(hdev, skb);
2770 case HCI_OP_LE_CLEAR_WHITE_LIST:
2771 hci_cc_le_clear_white_list(hdev, skb);
2774 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2775 hci_cc_le_add_to_white_list(hdev, skb);
2778 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2779 hci_cc_le_del_from_white_list(hdev, skb);
2782 case HCI_OP_LE_READ_SUPPORTED_STATES:
2783 hci_cc_le_read_supported_states(hdev, skb);
2786 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2787 hci_cc_write_le_host_supported(hdev, skb);
2790 case HCI_OP_LE_SET_ADV_PARAM:
2791 hci_cc_set_adv_param(hdev, skb);
2794 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2795 hci_cc_write_remote_amp_assoc(hdev, skb);
2798 case HCI_OP_READ_RSSI:
2799 hci_cc_read_rssi(hdev, skb);
2802 case HCI_OP_READ_TX_POWER:
2803 hci_cc_read_tx_power(hdev, skb);
2807 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2811 if (opcode != HCI_OP_NOP)
2812 cancel_delayed_work(&hdev->cmd_timer);
2814 hci_req_cmd_complete(hdev, opcode, status);
2816 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2817 atomic_set(&hdev->cmd_cnt, 1);
2818 if (!skb_queue_empty(&hdev->cmd_q))
2819 queue_work(hdev->workqueue, &hdev->cmd_work);
2823 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2825 struct hci_ev_cmd_status *ev = (void *) skb->data;
2828 skb_pull(skb, sizeof(*ev));
2830 opcode = __le16_to_cpu(ev->opcode);
2833 case HCI_OP_INQUIRY:
2834 hci_cs_inquiry(hdev, ev->status);
2837 case HCI_OP_CREATE_CONN:
2838 hci_cs_create_conn(hdev, ev->status);
2841 case HCI_OP_ADD_SCO:
2842 hci_cs_add_sco(hdev, ev->status);
2845 case HCI_OP_AUTH_REQUESTED:
2846 hci_cs_auth_requested(hdev, ev->status);
2849 case HCI_OP_SET_CONN_ENCRYPT:
2850 hci_cs_set_conn_encrypt(hdev, ev->status);
2853 case HCI_OP_REMOTE_NAME_REQ:
2854 hci_cs_remote_name_req(hdev, ev->status);
2857 case HCI_OP_READ_REMOTE_FEATURES:
2858 hci_cs_read_remote_features(hdev, ev->status);
2861 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2862 hci_cs_read_remote_ext_features(hdev, ev->status);
2865 case HCI_OP_SETUP_SYNC_CONN:
2866 hci_cs_setup_sync_conn(hdev, ev->status);
2869 case HCI_OP_SNIFF_MODE:
2870 hci_cs_sniff_mode(hdev, ev->status);
2873 case HCI_OP_EXIT_SNIFF_MODE:
2874 hci_cs_exit_sniff_mode(hdev, ev->status);
2877 case HCI_OP_DISCONNECT:
2878 hci_cs_disconnect(hdev, ev->status);
2881 case HCI_OP_CREATE_PHY_LINK:
2882 hci_cs_create_phylink(hdev, ev->status);
2885 case HCI_OP_ACCEPT_PHY_LINK:
2886 hci_cs_accept_phylink(hdev, ev->status);
2889 case HCI_OP_LE_CREATE_CONN:
2890 hci_cs_le_create_conn(hdev, ev->status);
2893 case HCI_OP_LE_START_ENC:
2894 hci_cs_le_start_enc(hdev, ev->status);
2898 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2902 if (opcode != HCI_OP_NOP)
2903 cancel_delayed_work(&hdev->cmd_timer);
2906 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2907 hci_req_cmd_complete(hdev, opcode, ev->status);
2909 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2910 atomic_set(&hdev->cmd_cnt, 1);
2911 if (!skb_queue_empty(&hdev->cmd_q))
2912 queue_work(hdev->workqueue, &hdev->cmd_work);
2916 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2918 struct hci_ev_role_change *ev = (void *) skb->data;
2919 struct hci_conn *conn;
2921 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2925 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2929 clear_bit(HCI_CONN_MASTER, &conn->flags);
2931 set_bit(HCI_CONN_MASTER, &conn->flags);
2934 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2936 hci_role_switch_cfm(conn, ev->status, ev->role);
2939 hci_dev_unlock(hdev);
2942 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2944 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2947 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2948 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2952 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2953 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2954 BT_DBG("%s bad parameters", hdev->name);
2958 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2960 for (i = 0; i < ev->num_hndl; i++) {
2961 struct hci_comp_pkts_info *info = &ev->handles[i];
2962 struct hci_conn *conn;
2963 __u16 handle, count;
2965 handle = __le16_to_cpu(info->handle);
2966 count = __le16_to_cpu(info->count);
2968 conn = hci_conn_hash_lookup_handle(hdev, handle);
2972 conn->sent -= count;
2974 switch (conn->type) {
2976 hdev->acl_cnt += count;
2977 if (hdev->acl_cnt > hdev->acl_pkts)
2978 hdev->acl_cnt = hdev->acl_pkts;
2982 if (hdev->le_pkts) {
2983 hdev->le_cnt += count;
2984 if (hdev->le_cnt > hdev->le_pkts)
2985 hdev->le_cnt = hdev->le_pkts;
2987 hdev->acl_cnt += count;
2988 if (hdev->acl_cnt > hdev->acl_pkts)
2989 hdev->acl_cnt = hdev->acl_pkts;
2994 hdev->sco_cnt += count;
2995 if (hdev->sco_cnt > hdev->sco_pkts)
2996 hdev->sco_cnt = hdev->sco_pkts;
3000 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3005 queue_work(hdev->workqueue, &hdev->tx_work);
3008 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3011 struct hci_chan *chan;
3013 switch (hdev->dev_type) {
3015 return hci_conn_hash_lookup_handle(hdev, handle);
3017 chan = hci_chan_lookup_handle(hdev, handle);
3022 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3029 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3031 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3034 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3035 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3039 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3040 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3041 BT_DBG("%s bad parameters", hdev->name);
3045 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3048 for (i = 0; i < ev->num_hndl; i++) {
3049 struct hci_comp_blocks_info *info = &ev->handles[i];
3050 struct hci_conn *conn = NULL;
3051 __u16 handle, block_count;
3053 handle = __le16_to_cpu(info->handle);
3054 block_count = __le16_to_cpu(info->blocks);
3056 conn = __hci_conn_lookup_handle(hdev, handle);
3060 conn->sent -= block_count;
3062 switch (conn->type) {
3065 hdev->block_cnt += block_count;
3066 if (hdev->block_cnt > hdev->num_blocks)
3067 hdev->block_cnt = hdev->num_blocks;
3071 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3076 queue_work(hdev->workqueue, &hdev->tx_work);
3079 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3081 struct hci_ev_mode_change *ev = (void *) skb->data;
3082 struct hci_conn *conn;
3084 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3088 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3090 conn->mode = ev->mode;
3092 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3094 if (conn->mode == HCI_CM_ACTIVE)
3095 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3097 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3100 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3101 hci_sco_setup(conn, ev->status);
3104 hci_dev_unlock(hdev);
3107 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3109 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3110 struct hci_conn *conn;
3112 BT_DBG("%s", hdev->name);
3116 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3120 if (conn->state == BT_CONNECTED) {
3121 hci_conn_hold(conn);
3122 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3123 hci_conn_drop(conn);
3126 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
3127 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3128 sizeof(ev->bdaddr), &ev->bdaddr);
3129 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3132 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3137 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3141 hci_dev_unlock(hdev);
3144 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3146 struct hci_ev_link_key_req *ev = (void *) skb->data;
3147 struct hci_cp_link_key_reply cp;
3148 struct hci_conn *conn;
3149 struct link_key *key;
3151 BT_DBG("%s", hdev->name);
3153 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3158 key = hci_find_link_key(hdev, &ev->bdaddr);
3160 BT_DBG("%s link key not found for %pMR", hdev->name,
3165 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3168 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3170 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3171 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3172 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3173 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3177 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3178 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3179 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3180 BT_DBG("%s ignoring key unauthenticated for high security",
3185 conn->key_type = key->type;
3186 conn->pin_length = key->pin_len;
3189 bacpy(&cp.bdaddr, &ev->bdaddr);
3190 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3192 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3194 hci_dev_unlock(hdev);
3199 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3200 hci_dev_unlock(hdev);
3203 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3205 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3206 struct hci_conn *conn;
3207 struct link_key *key;
3211 BT_DBG("%s", hdev->name);
3215 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3217 hci_conn_hold(conn);
3218 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3219 pin_len = conn->pin_length;
3221 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3222 conn->key_type = ev->key_type;
3224 hci_conn_drop(conn);
3227 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3230 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3231 ev->key_type, pin_len, &persistent);
3235 mgmt_new_link_key(hdev, key, persistent);
3237 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3238 * is set. If it's not set simply remove the key from the kernel
3239 * list (we've still notified user space about it but with
3240 * store_hint being 0).
3242 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3243 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3244 list_del(&key->list);
3248 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3250 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3254 hci_dev_unlock(hdev);
3257 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3259 struct hci_ev_clock_offset *ev = (void *) skb->data;
3260 struct hci_conn *conn;
3262 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3266 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3267 if (conn && !ev->status) {
3268 struct inquiry_entry *ie;
3270 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3272 ie->data.clock_offset = ev->clock_offset;
3273 ie->timestamp = jiffies;
3277 hci_dev_unlock(hdev);
3280 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3282 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3283 struct hci_conn *conn;
3285 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3289 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3290 if (conn && !ev->status)
3291 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3293 hci_dev_unlock(hdev);
3296 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3298 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3299 struct inquiry_entry *ie;
3301 BT_DBG("%s", hdev->name);
3305 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3307 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3308 ie->timestamp = jiffies;
3311 hci_dev_unlock(hdev);
3314 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3315 struct sk_buff *skb)
3317 struct inquiry_data data;
3318 int num_rsp = *((__u8 *) skb->data);
3320 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3325 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3330 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3331 struct inquiry_info_with_rssi_and_pscan_mode *info;
3332 info = (void *) (skb->data + 1);
3334 for (; num_rsp; num_rsp--, info++) {
3337 bacpy(&data.bdaddr, &info->bdaddr);
3338 data.pscan_rep_mode = info->pscan_rep_mode;
3339 data.pscan_period_mode = info->pscan_period_mode;
3340 data.pscan_mode = info->pscan_mode;
3341 memcpy(data.dev_class, info->dev_class, 3);
3342 data.clock_offset = info->clock_offset;
3343 data.rssi = info->rssi;
3344 data.ssp_mode = 0x00;
3346 flags = hci_inquiry_cache_update(hdev, &data, false);
3348 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3349 info->dev_class, info->rssi,
3350 flags, NULL, 0, NULL, 0);
3353 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3355 for (; num_rsp; num_rsp--, info++) {
3358 bacpy(&data.bdaddr, &info->bdaddr);
3359 data.pscan_rep_mode = info->pscan_rep_mode;
3360 data.pscan_period_mode = info->pscan_period_mode;
3361 data.pscan_mode = 0x00;
3362 memcpy(data.dev_class, info->dev_class, 3);
3363 data.clock_offset = info->clock_offset;
3364 data.rssi = info->rssi;
3365 data.ssp_mode = 0x00;
3367 flags = hci_inquiry_cache_update(hdev, &data, false);
3369 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3370 info->dev_class, info->rssi,
3371 flags, NULL, 0, NULL, 0);
3375 hci_dev_unlock(hdev);
3378 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3379 struct sk_buff *skb)
3381 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3382 struct hci_conn *conn;
3384 BT_DBG("%s", hdev->name);
3388 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3392 if (ev->page < HCI_MAX_PAGES)
3393 memcpy(conn->features[ev->page], ev->features, 8);
3395 if (!ev->status && ev->page == 0x01) {
3396 struct inquiry_entry *ie;
3398 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3400 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3402 if (ev->features[0] & LMP_HOST_SSP) {
3403 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3405 /* It is mandatory by the Bluetooth specification that
3406 * Extended Inquiry Results are only used when Secure
3407 * Simple Pairing is enabled, but some devices violate
3410 * To make these devices work, the internal SSP
3411 * enabled flag needs to be cleared if the remote host
3412 * features do not indicate SSP support */
3413 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3416 if (ev->features[0] & LMP_HOST_SC)
3417 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3420 if (conn->state != BT_CONFIG)
3423 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3424 struct hci_cp_remote_name_req cp;
3425 memset(&cp, 0, sizeof(cp));
3426 bacpy(&cp.bdaddr, &conn->dst);
3427 cp.pscan_rep_mode = 0x02;
3428 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3429 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3430 mgmt_device_connected(hdev, &conn->dst, conn->type,
3431 conn->dst_type, 0, NULL, 0,
3434 if (!hci_outgoing_auth_needed(hdev, conn)) {
3435 conn->state = BT_CONNECTED;
3436 hci_proto_connect_cfm(conn, ev->status);
3437 hci_conn_drop(conn);
3441 hci_dev_unlock(hdev);
3444 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3445 struct sk_buff *skb)
3447 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3448 struct hci_conn *conn;
3450 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3454 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3456 if (ev->link_type == ESCO_LINK)
3459 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3463 conn->type = SCO_LINK;
3466 switch (ev->status) {
3468 conn->handle = __le16_to_cpu(ev->handle);
3469 conn->state = BT_CONNECTED;
3471 hci_conn_add_sysfs(conn);
3474 case 0x10: /* Connection Accept Timeout */
3475 case 0x0d: /* Connection Rejected due to Limited Resources */
3476 case 0x11: /* Unsupported Feature or Parameter Value */
3477 case 0x1c: /* SCO interval rejected */
3478 case 0x1a: /* Unsupported Remote Feature */
3479 case 0x1f: /* Unspecified error */
3480 case 0x20: /* Unsupported LMP Parameter value */
3482 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3483 (hdev->esco_type & EDR_ESCO_MASK);
3484 if (hci_setup_sync(conn, conn->link->handle))
3490 conn->state = BT_CLOSED;
3494 hci_proto_connect_cfm(conn, ev->status);
3499 hci_dev_unlock(hdev);
3502 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3506 while (parsed < eir_len) {
3507 u8 field_len = eir[0];
3512 parsed += field_len + 1;
3513 eir += field_len + 1;
3519 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3520 struct sk_buff *skb)
3522 struct inquiry_data data;
3523 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3524 int num_rsp = *((__u8 *) skb->data);
3527 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3532 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3537 for (; num_rsp; num_rsp--, info++) {
3541 bacpy(&data.bdaddr, &info->bdaddr);
3542 data.pscan_rep_mode = info->pscan_rep_mode;
3543 data.pscan_period_mode = info->pscan_period_mode;
3544 data.pscan_mode = 0x00;
3545 memcpy(data.dev_class, info->dev_class, 3);
3546 data.clock_offset = info->clock_offset;
3547 data.rssi = info->rssi;
3548 data.ssp_mode = 0x01;
3550 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3551 name_known = eir_has_data_type(info->data,
3557 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3559 eir_len = eir_get_length(info->data, sizeof(info->data));
3561 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3562 info->dev_class, info->rssi,
3563 flags, info->data, eir_len, NULL, 0);
3566 hci_dev_unlock(hdev);
3569 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3570 struct sk_buff *skb)
3572 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3573 struct hci_conn *conn;
3575 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3576 __le16_to_cpu(ev->handle));
3580 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3584 /* For BR/EDR the necessary steps are taken through the
3585 * auth_complete event.
3587 if (conn->type != LE_LINK)
3591 conn->sec_level = conn->pending_sec_level;
3593 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3595 if (ev->status && conn->state == BT_CONNECTED) {
3596 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3597 hci_conn_drop(conn);
3601 if (conn->state == BT_CONFIG) {
3603 conn->state = BT_CONNECTED;
3605 hci_proto_connect_cfm(conn, ev->status);
3606 hci_conn_drop(conn);
3608 hci_auth_cfm(conn, ev->status);
3610 hci_conn_hold(conn);
3611 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3612 hci_conn_drop(conn);
3616 hci_dev_unlock(hdev);
3619 static u8 hci_get_auth_req(struct hci_conn *conn)
3621 /* If remote requests no-bonding follow that lead */
3622 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3623 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3624 return conn->remote_auth | (conn->auth_type & 0x01);
3626 /* If both remote and local have enough IO capabilities, require
3629 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3630 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3631 return conn->remote_auth | 0x01;
3633 /* No MITM protection possible so ignore remote requirement */
3634 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3637 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3639 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3640 struct hci_conn *conn;
3642 BT_DBG("%s", hdev->name);
3646 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3650 hci_conn_hold(conn);
3652 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3655 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3656 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3657 struct hci_cp_io_capability_reply cp;
3659 bacpy(&cp.bdaddr, &ev->bdaddr);
3660 /* Change the IO capability from KeyboardDisplay
3661 * to DisplayYesNo as it is not supported by BT spec. */
3662 cp.capability = (conn->io_capability == 0x04) ?
3663 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3665 /* If we are initiators, there is no remote information yet */
3666 if (conn->remote_auth == 0xff) {
3667 /* Request MITM protection if our IO caps allow it
3668 * except for the no-bonding case.
3670 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3671 cp.authentication != HCI_AT_NO_BONDING)
3672 conn->auth_type |= 0x01;
3674 cp.authentication = conn->auth_type;
3676 conn->auth_type = hci_get_auth_req(conn);
3677 cp.authentication = conn->auth_type;
3680 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3681 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3686 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3689 struct hci_cp_io_capability_neg_reply cp;
3691 bacpy(&cp.bdaddr, &ev->bdaddr);
3692 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3694 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3699 hci_dev_unlock(hdev);
3702 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3704 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3705 struct hci_conn *conn;
3707 BT_DBG("%s", hdev->name);
3711 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3715 conn->remote_cap = ev->capability;
3716 conn->remote_auth = ev->authentication;
3718 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3721 hci_dev_unlock(hdev);
3724 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3725 struct sk_buff *skb)
3727 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3728 int loc_mitm, rem_mitm, confirm_hint = 0;
3729 struct hci_conn *conn;
3731 BT_DBG("%s", hdev->name);
3735 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3738 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3742 loc_mitm = (conn->auth_type & 0x01);
3743 rem_mitm = (conn->remote_auth & 0x01);
3745 /* If we require MITM but the remote device can't provide that
3746 * (it has NoInputNoOutput) then reject the confirmation
3747 * request. We check the security level here since it doesn't
3748 * necessarily match conn->auth_type.
3750 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
3751 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3752 BT_DBG("Rejecting request: remote device can't provide MITM");
3753 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3754 sizeof(ev->bdaddr), &ev->bdaddr);
3758 /* If no side requires MITM protection; auto-accept */
3759 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3760 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3762 /* If we're not the initiators request authorization to
3763 * proceed from user space (mgmt_user_confirm with
3764 * confirm_hint set to 1). The exception is if neither
3765 * side had MITM in which case we do auto-accept.
3767 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3768 (loc_mitm || rem_mitm)) {
3769 BT_DBG("Confirming auto-accept as acceptor");
3774 BT_DBG("Auto-accept of user confirmation with %ums delay",
3775 hdev->auto_accept_delay);
3777 if (hdev->auto_accept_delay > 0) {
3778 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3779 queue_delayed_work(conn->hdev->workqueue,
3780 &conn->auto_accept_work, delay);
3784 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3785 sizeof(ev->bdaddr), &ev->bdaddr);
3790 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3791 le32_to_cpu(ev->passkey), confirm_hint);
3794 hci_dev_unlock(hdev);
3797 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3798 struct sk_buff *skb)
3800 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3802 BT_DBG("%s", hdev->name);
3804 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3805 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3808 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3809 struct sk_buff *skb)
3811 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3812 struct hci_conn *conn;
3814 BT_DBG("%s", hdev->name);
3816 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3820 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3821 conn->passkey_entered = 0;
3823 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3824 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3825 conn->dst_type, conn->passkey_notify,
3826 conn->passkey_entered);
3829 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3831 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3832 struct hci_conn *conn;
3834 BT_DBG("%s", hdev->name);
3836 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3841 case HCI_KEYPRESS_STARTED:
3842 conn->passkey_entered = 0;
3845 case HCI_KEYPRESS_ENTERED:
3846 conn->passkey_entered++;
3849 case HCI_KEYPRESS_ERASED:
3850 conn->passkey_entered--;
3853 case HCI_KEYPRESS_CLEARED:
3854 conn->passkey_entered = 0;
3857 case HCI_KEYPRESS_COMPLETED:
3861 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3862 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3863 conn->dst_type, conn->passkey_notify,
3864 conn->passkey_entered);
3867 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3868 struct sk_buff *skb)
3870 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3871 struct hci_conn *conn;
3873 BT_DBG("%s", hdev->name);
3877 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3881 /* To avoid duplicate auth_failed events to user space we check
3882 * the HCI_CONN_AUTH_PEND flag which will be set if we
3883 * initiated the authentication. A traditional auth_complete
3884 * event gets always produced as initiator and is also mapped to
3885 * the mgmt_auth_failed event */
3886 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3887 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3890 hci_conn_drop(conn);
3893 hci_dev_unlock(hdev);
3896 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3897 struct sk_buff *skb)
3899 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3900 struct inquiry_entry *ie;
3901 struct hci_conn *conn;
3903 BT_DBG("%s", hdev->name);
3907 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3909 memcpy(conn->features[1], ev->features, 8);
3911 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3913 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3915 hci_dev_unlock(hdev);
3918 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3919 struct sk_buff *skb)
3921 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3922 struct oob_data *data;
3924 BT_DBG("%s", hdev->name);
3928 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3931 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3933 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3934 struct hci_cp_remote_oob_ext_data_reply cp;
3936 bacpy(&cp.bdaddr, &ev->bdaddr);
3937 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3938 memcpy(cp.randomizer192, data->randomizer192,
3939 sizeof(cp.randomizer192));
3940 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3941 memcpy(cp.randomizer256, data->randomizer256,
3942 sizeof(cp.randomizer256));
3944 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3947 struct hci_cp_remote_oob_data_reply cp;
3949 bacpy(&cp.bdaddr, &ev->bdaddr);
3950 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3951 memcpy(cp.randomizer, data->randomizer192,
3952 sizeof(cp.randomizer));
3954 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3958 struct hci_cp_remote_oob_data_neg_reply cp;
3960 bacpy(&cp.bdaddr, &ev->bdaddr);
3961 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3966 hci_dev_unlock(hdev);
3969 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3970 struct sk_buff *skb)
3972 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3973 struct hci_conn *hcon, *bredr_hcon;
3975 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3980 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3982 hci_dev_unlock(hdev);
3988 hci_dev_unlock(hdev);
3992 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3994 hcon->state = BT_CONNECTED;
3995 bacpy(&hcon->dst, &bredr_hcon->dst);
3997 hci_conn_hold(hcon);
3998 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3999 hci_conn_drop(hcon);
4001 hci_conn_add_sysfs(hcon);
4003 amp_physical_cfm(bredr_hcon, hcon);
4005 hci_dev_unlock(hdev);
4008 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4010 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4011 struct hci_conn *hcon;
4012 struct hci_chan *hchan;
4013 struct amp_mgr *mgr;
4015 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4016 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4019 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4023 /* Create AMP hchan */
4024 hchan = hci_chan_create(hcon);
4028 hchan->handle = le16_to_cpu(ev->handle);
4030 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4032 mgr = hcon->amp_mgr;
4033 if (mgr && mgr->bredr_chan) {
4034 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4036 l2cap_chan_lock(bredr_chan);
4038 bredr_chan->conn->mtu = hdev->block_mtu;
4039 l2cap_logical_cfm(bredr_chan, hchan, 0);
4040 hci_conn_hold(hcon);
4042 l2cap_chan_unlock(bredr_chan);
4046 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4047 struct sk_buff *skb)
4049 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4050 struct hci_chan *hchan;
4052 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4053 le16_to_cpu(ev->handle), ev->status);
4060 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4064 amp_destroy_logical_link(hchan, ev->reason);
4067 hci_dev_unlock(hdev);
4070 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4071 struct sk_buff *skb)
4073 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4074 struct hci_conn *hcon;
4076 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4083 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4085 hcon->state = BT_CLOSED;
4089 hci_dev_unlock(hdev);
4092 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4094 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4095 struct hci_conn_params *params;
4096 struct hci_conn *conn;
4097 struct smp_irk *irk;
4100 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4104 /* All controllers implicitly stop advertising in the event of a
4105 * connection, so ensure that the state bit is cleared.
4107 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4109 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4111 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
4113 BT_ERR("No memory for new connection");
4117 conn->dst_type = ev->bdaddr_type;
4119 if (ev->role == HCI_ROLE_MASTER) {
4121 set_bit(HCI_CONN_MASTER, &conn->flags);
4124 /* If we didn't have a hci_conn object previously
4125 * but we're in master role this must be something
4126 * initiated using a white list. Since white list based
4127 * connections are not "first class citizens" we don't
4128 * have full tracking of them. Therefore, we go ahead
4129 * with a "best effort" approach of determining the
4130 * initiator address based on the HCI_PRIVACY flag.
4133 conn->resp_addr_type = ev->bdaddr_type;
4134 bacpy(&conn->resp_addr, &ev->bdaddr);
4135 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4136 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4137 bacpy(&conn->init_addr, &hdev->rpa);
4139 hci_copy_identity_address(hdev,
4141 &conn->init_addr_type);
4145 cancel_delayed_work(&conn->le_conn_timeout);
4149 /* Set the responder (our side) address type based on
4150 * the advertising address type.
4152 conn->resp_addr_type = hdev->adv_addr_type;
4153 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4154 bacpy(&conn->resp_addr, &hdev->random_addr);
4156 bacpy(&conn->resp_addr, &hdev->bdaddr);
4158 conn->init_addr_type = ev->bdaddr_type;
4159 bacpy(&conn->init_addr, &ev->bdaddr);
4161 /* For incoming connections, set the default minimum
4162 * and maximum connection interval. They will be used
4163 * to check if the parameters are in range and if not
4164 * trigger the connection update procedure.
4166 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4167 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4170 /* Lookup the identity address from the stored connection
4171 * address and address type.
4173 * When establishing connections to an identity address, the
4174 * connection procedure will store the resolvable random
4175 * address first. Now if it can be converted back into the
4176 * identity address, start using the identity address from
4179 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4181 bacpy(&conn->dst, &irk->bdaddr);
4182 conn->dst_type = irk->addr_type;
4185 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4186 addr_type = BDADDR_LE_PUBLIC;
4188 addr_type = BDADDR_LE_RANDOM;
4191 hci_le_conn_failed(conn, ev->status);
4195 /* Drop the connection if the device is blocked */
4196 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4197 hci_conn_drop(conn);
4201 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4202 mgmt_device_connected(hdev, &conn->dst, conn->type,
4203 conn->dst_type, 0, NULL, 0, NULL);
4205 conn->sec_level = BT_SECURITY_LOW;
4206 conn->handle = __le16_to_cpu(ev->handle);
4207 conn->state = BT_CONNECTED;
4209 conn->le_conn_interval = le16_to_cpu(ev->interval);
4210 conn->le_conn_latency = le16_to_cpu(ev->latency);
4211 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4213 hci_conn_add_sysfs(conn);
4215 hci_proto_connect_cfm(conn, ev->status);
4217 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
4219 list_del_init(¶ms->action);
4222 hci_update_background_scan(hdev);
4223 hci_dev_unlock(hdev);
4226 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4227 struct sk_buff *skb)
4229 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4230 struct hci_conn *conn;
4232 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4239 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4241 conn->le_conn_interval = le16_to_cpu(ev->interval);
4242 conn->le_conn_latency = le16_to_cpu(ev->latency);
4243 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4246 hci_dev_unlock(hdev);
4249 /* This function requires the caller holds hdev->lock */
4250 static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4251 u8 addr_type, u8 adv_type)
4253 struct hci_conn *conn;
4255 /* If the event is not connectable don't proceed further */
4256 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4259 /* Ignore if the device is blocked */
4260 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4263 /* If we're connectable, always connect any ADV_DIRECT_IND event */
4264 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
4265 adv_type == LE_ADV_DIRECT_IND)
4268 /* If we're not connectable only connect devices that we have in
4269 * our pend_le_conns list.
4271 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, addr_type))
4275 /* Request connection in master = true role */
4276 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4277 HCI_LE_AUTOCONN_TIMEOUT, true);
4281 switch (PTR_ERR(conn)) {
4283 /* If hci_connect() returns -EBUSY it means there is already
4284 * an LE connection attempt going on. Since controllers don't
4285 * support more than one connection attempt at the time, we
4286 * don't consider this an error case.
4290 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4294 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4295 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4297 struct discovery_state *d = &hdev->discovery;
4298 struct smp_irk *irk;
4302 /* Check if we need to convert to identity address */
4303 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4305 bdaddr = &irk->bdaddr;
4306 bdaddr_type = irk->addr_type;
4309 /* Check if we have been requested to connect to this device */
4310 check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4312 /* Passive scanning shouldn't trigger any device found events,
4313 * except for devices marked as CONN_REPORT for which we do send
4314 * device found events.
4316 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4317 if (type == LE_ADV_DIRECT_IND)
4320 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4321 bdaddr, bdaddr_type))
4324 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4325 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4328 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4329 rssi, flags, data, len, NULL, 0);
4333 /* When receiving non-connectable or scannable undirected
4334 * advertising reports, this means that the remote device is
4335 * not connectable and then clearly indicate this in the
4336 * device found event.
4338 * When receiving a scan response, then there is no way to
4339 * know if the remote device is connectable or not. However
4340 * since scan responses are merged with a previously seen
4341 * advertising report, the flags field from that report
4344 * In the really unlikely case that a controller get confused
4345 * and just sends a scan response event, then it is marked as
4346 * not connectable as well.
4348 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4349 type == LE_ADV_SCAN_RSP)
4350 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4354 /* If there's nothing pending either store the data from this
4355 * event or send an immediate device found event if the data
4356 * should not be stored for later.
4358 if (!has_pending_adv_report(hdev)) {
4359 /* If the report will trigger a SCAN_REQ store it for
4362 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4363 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4364 rssi, flags, data, len);
4368 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4369 rssi, flags, data, len, NULL, 0);
4373 /* Check if the pending report is for the same device as the new one */
4374 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4375 bdaddr_type == d->last_adv_addr_type);
4377 /* If the pending data doesn't match this report or this isn't a
4378 * scan response (e.g. we got a duplicate ADV_IND) then force
4379 * sending of the pending data.
4381 if (type != LE_ADV_SCAN_RSP || !match) {
4382 /* Send out whatever is in the cache, but skip duplicates */
4384 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4385 d->last_adv_addr_type, NULL,
4386 d->last_adv_rssi, d->last_adv_flags,
4388 d->last_adv_data_len, NULL, 0);
4390 /* If the new report will trigger a SCAN_REQ store it for
4393 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4394 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4395 rssi, flags, data, len);
4399 /* The advertising reports cannot be merged, so clear
4400 * the pending report and send out a device found event.
4402 clear_pending_adv_report(hdev);
4403 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4404 rssi, flags, data, len, NULL, 0);
4408 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4409 * the new event is a SCAN_RSP. We can therefore proceed with
4410 * sending a merged device found event.
4412 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4413 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4414 d->last_adv_data, d->last_adv_data_len, data, len);
4415 clear_pending_adv_report(hdev);
4418 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4420 u8 num_reports = skb->data[0];
4421 void *ptr = &skb->data[1];
4425 while (num_reports--) {
4426 struct hci_ev_le_advertising_info *ev = ptr;
4429 rssi = ev->data[ev->length];
4430 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4431 ev->bdaddr_type, rssi, ev->data, ev->length);
4433 ptr += sizeof(*ev) + ev->length + 1;
4436 hci_dev_unlock(hdev);
4439 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4441 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4442 struct hci_cp_le_ltk_reply cp;
4443 struct hci_cp_le_ltk_neg_reply neg;
4444 struct hci_conn *conn;
4445 struct smp_ltk *ltk;
4447 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4451 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4455 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
4459 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4460 cp.handle = cpu_to_le16(conn->handle);
4462 if (ltk->authenticated)
4463 conn->pending_sec_level = BT_SECURITY_HIGH;
4465 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4467 conn->enc_key_size = ltk->enc_size;
4469 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4471 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4472 * temporary key used to encrypt a connection following
4473 * pairing. It is used during the Encrypted Session Setup to
4474 * distribute the keys. Later, security can be re-established
4475 * using a distributed LTK.
4477 if (ltk->type == SMP_STK) {
4478 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4479 list_del(<k->list);
4482 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4485 hci_dev_unlock(hdev);
4490 neg.handle = ev->handle;
4491 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4492 hci_dev_unlock(hdev);
4495 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4498 struct hci_cp_le_conn_param_req_neg_reply cp;
4500 cp.handle = cpu_to_le16(handle);
4503 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4507 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4508 struct sk_buff *skb)
4510 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4511 struct hci_cp_le_conn_param_req_reply cp;
4512 struct hci_conn *hcon;
4513 u16 handle, min, max, latency, timeout;
4515 handle = le16_to_cpu(ev->handle);
4516 min = le16_to_cpu(ev->interval_min);
4517 max = le16_to_cpu(ev->interval_max);
4518 latency = le16_to_cpu(ev->latency);
4519 timeout = le16_to_cpu(ev->timeout);
4521 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4522 if (!hcon || hcon->state != BT_CONNECTED)
4523 return send_conn_param_neg_reply(hdev, handle,
4524 HCI_ERROR_UNKNOWN_CONN_ID);
4526 if (hci_check_conn_params(min, max, latency, timeout))
4527 return send_conn_param_neg_reply(hdev, handle,
4528 HCI_ERROR_INVALID_LL_PARAMS);
4530 if (test_bit(HCI_CONN_MASTER, &hcon->flags)) {
4531 struct hci_conn_params *params;
4536 params = hci_conn_params_lookup(hdev, &hcon->dst,
4539 params->conn_min_interval = min;
4540 params->conn_max_interval = max;
4541 params->conn_latency = latency;
4542 params->supervision_timeout = timeout;
4548 hci_dev_unlock(hdev);
4550 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4551 store_hint, min, max, latency, timeout);
4554 cp.handle = ev->handle;
4555 cp.interval_min = ev->interval_min;
4556 cp.interval_max = ev->interval_max;
4557 cp.latency = ev->latency;
4558 cp.timeout = ev->timeout;
4562 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4565 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4567 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4569 skb_pull(skb, sizeof(*le_ev));
4571 switch (le_ev->subevent) {
4572 case HCI_EV_LE_CONN_COMPLETE:
4573 hci_le_conn_complete_evt(hdev, skb);
4576 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4577 hci_le_conn_update_complete_evt(hdev, skb);
4580 case HCI_EV_LE_ADVERTISING_REPORT:
4581 hci_le_adv_report_evt(hdev, skb);
4584 case HCI_EV_LE_LTK_REQ:
4585 hci_le_ltk_request_evt(hdev, skb);
4588 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4589 hci_le_remote_conn_param_req_evt(hdev, skb);
4597 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4599 struct hci_ev_channel_selected *ev = (void *) skb->data;
4600 struct hci_conn *hcon;
4602 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4604 skb_pull(skb, sizeof(*ev));
4606 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4610 amp_read_loc_assoc_final_data(hdev, hcon);
4613 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4615 struct hci_event_hdr *hdr = (void *) skb->data;
4616 __u8 event = hdr->evt;
4620 /* Received events are (currently) only needed when a request is
4621 * ongoing so avoid unnecessary memory allocation.
4623 if (hci_req_pending(hdev)) {
4624 kfree_skb(hdev->recv_evt);
4625 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4628 hci_dev_unlock(hdev);
4630 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4632 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4633 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4634 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4636 hci_req_cmd_complete(hdev, opcode, 0);
4640 case HCI_EV_INQUIRY_COMPLETE:
4641 hci_inquiry_complete_evt(hdev, skb);
4644 case HCI_EV_INQUIRY_RESULT:
4645 hci_inquiry_result_evt(hdev, skb);
4648 case HCI_EV_CONN_COMPLETE:
4649 hci_conn_complete_evt(hdev, skb);
4652 case HCI_EV_CONN_REQUEST:
4653 hci_conn_request_evt(hdev, skb);
4656 case HCI_EV_DISCONN_COMPLETE:
4657 hci_disconn_complete_evt(hdev, skb);
4660 case HCI_EV_AUTH_COMPLETE:
4661 hci_auth_complete_evt(hdev, skb);
4664 case HCI_EV_REMOTE_NAME:
4665 hci_remote_name_evt(hdev, skb);
4668 case HCI_EV_ENCRYPT_CHANGE:
4669 hci_encrypt_change_evt(hdev, skb);
4672 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4673 hci_change_link_key_complete_evt(hdev, skb);
4676 case HCI_EV_REMOTE_FEATURES:
4677 hci_remote_features_evt(hdev, skb);
4680 case HCI_EV_CMD_COMPLETE:
4681 hci_cmd_complete_evt(hdev, skb);
4684 case HCI_EV_CMD_STATUS:
4685 hci_cmd_status_evt(hdev, skb);
4688 case HCI_EV_ROLE_CHANGE:
4689 hci_role_change_evt(hdev, skb);
4692 case HCI_EV_NUM_COMP_PKTS:
4693 hci_num_comp_pkts_evt(hdev, skb);
4696 case HCI_EV_MODE_CHANGE:
4697 hci_mode_change_evt(hdev, skb);
4700 case HCI_EV_PIN_CODE_REQ:
4701 hci_pin_code_request_evt(hdev, skb);
4704 case HCI_EV_LINK_KEY_REQ:
4705 hci_link_key_request_evt(hdev, skb);
4708 case HCI_EV_LINK_KEY_NOTIFY:
4709 hci_link_key_notify_evt(hdev, skb);
4712 case HCI_EV_CLOCK_OFFSET:
4713 hci_clock_offset_evt(hdev, skb);
4716 case HCI_EV_PKT_TYPE_CHANGE:
4717 hci_pkt_type_change_evt(hdev, skb);
4720 case HCI_EV_PSCAN_REP_MODE:
4721 hci_pscan_rep_mode_evt(hdev, skb);
4724 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4725 hci_inquiry_result_with_rssi_evt(hdev, skb);
4728 case HCI_EV_REMOTE_EXT_FEATURES:
4729 hci_remote_ext_features_evt(hdev, skb);
4732 case HCI_EV_SYNC_CONN_COMPLETE:
4733 hci_sync_conn_complete_evt(hdev, skb);
4736 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4737 hci_extended_inquiry_result_evt(hdev, skb);
4740 case HCI_EV_KEY_REFRESH_COMPLETE:
4741 hci_key_refresh_complete_evt(hdev, skb);
4744 case HCI_EV_IO_CAPA_REQUEST:
4745 hci_io_capa_request_evt(hdev, skb);
4748 case HCI_EV_IO_CAPA_REPLY:
4749 hci_io_capa_reply_evt(hdev, skb);
4752 case HCI_EV_USER_CONFIRM_REQUEST:
4753 hci_user_confirm_request_evt(hdev, skb);
4756 case HCI_EV_USER_PASSKEY_REQUEST:
4757 hci_user_passkey_request_evt(hdev, skb);
4760 case HCI_EV_USER_PASSKEY_NOTIFY:
4761 hci_user_passkey_notify_evt(hdev, skb);
4764 case HCI_EV_KEYPRESS_NOTIFY:
4765 hci_keypress_notify_evt(hdev, skb);
4768 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4769 hci_simple_pair_complete_evt(hdev, skb);
4772 case HCI_EV_REMOTE_HOST_FEATURES:
4773 hci_remote_host_features_evt(hdev, skb);
4776 case HCI_EV_LE_META:
4777 hci_le_meta_evt(hdev, skb);
4780 case HCI_EV_CHANNEL_SELECTED:
4781 hci_chan_selected_evt(hdev, skb);
4784 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4785 hci_remote_oob_data_request_evt(hdev, skb);
4788 case HCI_EV_PHY_LINK_COMPLETE:
4789 hci_phy_link_complete_evt(hdev, skb);
4792 case HCI_EV_LOGICAL_LINK_COMPLETE:
4793 hci_loglink_complete_evt(hdev, skb);
4796 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4797 hci_disconn_loglink_complete_evt(hdev, skb);
4800 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4801 hci_disconn_phylink_complete_evt(hdev, skb);
4804 case HCI_EV_NUM_COMP_BLOCKS:
4805 hci_num_comp_blocks_evt(hdev, skb);
4809 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4814 hdev->stat.evt_rx++;