2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 /* Handle HCI Event packets */
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
46 __u8 status = *((__u8 *) skb->data);
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
58 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
61 hci_conn_check_pending(hdev);
64 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
66 __u8 status = *((__u8 *) skb->data);
68 BT_DBG("%s status 0x%2.2x", hdev->name, status);
73 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
78 __u8 status = *((__u8 *) skb->data);
80 BT_DBG("%s status 0x%2.2x", hdev->name, status);
85 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
87 hci_conn_check_pending(hdev);
90 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
93 BT_DBG("%s", hdev->name);
96 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
98 struct hci_rp_role_discovery *rp = (void *) skb->data;
99 struct hci_conn *conn;
101 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
108 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
110 conn->role = rp->role;
112 hci_dev_unlock(hdev);
115 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 struct hci_rp_read_link_policy *rp = (void *) skb->data;
118 struct hci_conn *conn;
120 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
127 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 conn->link_policy = __le16_to_cpu(rp->policy);
131 hci_dev_unlock(hdev);
134 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 struct hci_rp_write_link_policy *rp = (void *) skb->data;
137 struct hci_conn *conn;
140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
145 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
151 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 conn->link_policy = get_unaligned_le16(sent + 2);
155 hci_dev_unlock(hdev);
158 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
168 hdev->link_policy = __le16_to_cpu(rp->policy);
171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
174 __u8 status = *((__u8 *) skb->data);
177 BT_DBG("%s status 0x%2.2x", hdev->name, status);
182 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
186 hdev->link_policy = get_unaligned_le16(sent);
189 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
191 __u8 status = *((__u8 *) skb->data);
193 BT_DBG("%s status 0x%2.2x", hdev->name, status);
195 clear_bit(HCI_RESET, &hdev->flags);
200 /* Reset all non-persistent flags */
201 hci_dev_clear_volatile_flags(hdev);
203 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
205 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
206 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
208 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
209 hdev->adv_data_len = 0;
211 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
212 hdev->scan_rsp_data_len = 0;
214 hdev->le_scan_type = LE_SCAN_PASSIVE;
216 hdev->ssp_debug_mode = 0;
218 hci_bdaddr_list_clear(&hdev->le_white_list);
221 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
224 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
225 struct hci_cp_read_stored_link_key *sent;
227 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
229 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
233 if (!rp->status && sent->read_all == 0x01) {
234 hdev->stored_max_keys = rp->max_keys;
235 hdev->stored_num_keys = rp->num_keys;
239 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
242 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
244 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
249 if (rp->num_keys <= hdev->stored_num_keys)
250 hdev->stored_num_keys -= rp->num_keys;
252 hdev->stored_num_keys = 0;
255 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
257 __u8 status = *((__u8 *) skb->data);
260 BT_DBG("%s status 0x%2.2x", hdev->name, status);
262 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
268 if (hci_dev_test_flag(hdev, HCI_MGMT))
269 mgmt_set_local_name_complete(hdev, sent, status);
271 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
273 hci_dev_unlock(hdev);
276 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
278 struct hci_rp_read_local_name *rp = (void *) skb->data;
280 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
285 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
286 hci_dev_test_flag(hdev, HCI_CONFIG))
287 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
290 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
292 __u8 status = *((__u8 *) skb->data);
295 BT_DBG("%s status 0x%2.2x", hdev->name, status);
297 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
304 __u8 param = *((__u8 *) sent);
306 if (param == AUTH_ENABLED)
307 set_bit(HCI_AUTH, &hdev->flags);
309 clear_bit(HCI_AUTH, &hdev->flags);
312 if (hci_dev_test_flag(hdev, HCI_MGMT))
313 mgmt_auth_enable_complete(hdev, status);
315 hci_dev_unlock(hdev);
318 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
320 __u8 status = *((__u8 *) skb->data);
324 BT_DBG("%s status 0x%2.2x", hdev->name, status);
329 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
333 param = *((__u8 *) sent);
336 set_bit(HCI_ENCRYPT, &hdev->flags);
338 clear_bit(HCI_ENCRYPT, &hdev->flags);
341 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
343 __u8 status = *((__u8 *) skb->data);
347 BT_DBG("%s status 0x%2.2x", hdev->name, status);
349 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
353 param = *((__u8 *) sent);
358 hdev->discov_timeout = 0;
362 if (param & SCAN_INQUIRY)
363 set_bit(HCI_ISCAN, &hdev->flags);
365 clear_bit(HCI_ISCAN, &hdev->flags);
367 if (param & SCAN_PAGE)
368 set_bit(HCI_PSCAN, &hdev->flags);
370 clear_bit(HCI_PSCAN, &hdev->flags);
373 hci_dev_unlock(hdev);
376 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
378 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
380 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
385 memcpy(hdev->dev_class, rp->dev_class, 3);
387 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
388 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
391 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
393 __u8 status = *((__u8 *) skb->data);
396 BT_DBG("%s status 0x%2.2x", hdev->name, status);
398 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
405 memcpy(hdev->dev_class, sent, 3);
407 if (hci_dev_test_flag(hdev, HCI_MGMT))
408 mgmt_set_class_of_dev_complete(hdev, sent, status);
410 hci_dev_unlock(hdev);
413 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
415 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
418 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
423 setting = __le16_to_cpu(rp->voice_setting);
425 if (hdev->voice_setting == setting)
428 hdev->voice_setting = setting;
430 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
433 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
436 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
439 __u8 status = *((__u8 *) skb->data);
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
448 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
452 setting = get_unaligned_le16(sent);
454 if (hdev->voice_setting == setting)
457 hdev->voice_setting = setting;
459 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
462 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
465 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
468 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
470 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
475 hdev->num_iac = rp->num_iac;
477 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
480 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
482 __u8 status = *((__u8 *) skb->data);
483 struct hci_cp_write_ssp_mode *sent;
485 BT_DBG("%s status 0x%2.2x", hdev->name, status);
487 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
495 hdev->features[1][0] |= LMP_HOST_SSP;
497 hdev->features[1][0] &= ~LMP_HOST_SSP;
500 if (hci_dev_test_flag(hdev, HCI_MGMT))
501 mgmt_ssp_enable_complete(hdev, sent->mode, status);
504 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
506 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
509 hci_dev_unlock(hdev);
512 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
514 u8 status = *((u8 *) skb->data);
515 struct hci_cp_write_sc_support *sent;
517 BT_DBG("%s status 0x%2.2x", hdev->name, status);
519 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
527 hdev->features[1][0] |= LMP_HOST_SC;
529 hdev->features[1][0] &= ~LMP_HOST_SC;
532 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
534 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
536 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
539 hci_dev_unlock(hdev);
542 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
544 struct hci_rp_read_local_version *rp = (void *) skb->data;
546 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
551 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
552 hci_dev_test_flag(hdev, HCI_CONFIG)) {
553 hdev->hci_ver = rp->hci_ver;
554 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
555 hdev->lmp_ver = rp->lmp_ver;
556 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
557 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
561 static void hci_cc_read_local_commands(struct hci_dev *hdev,
564 struct hci_rp_read_local_commands *rp = (void *) skb->data;
566 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
571 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
572 hci_dev_test_flag(hdev, HCI_CONFIG))
573 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
576 static void hci_cc_read_local_features(struct hci_dev *hdev,
579 struct hci_rp_read_local_features *rp = (void *) skb->data;
581 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
586 memcpy(hdev->features, rp->features, 8);
588 /* Adjust default settings according to features
589 * supported by device. */
591 if (hdev->features[0][0] & LMP_3SLOT)
592 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
594 if (hdev->features[0][0] & LMP_5SLOT)
595 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
597 if (hdev->features[0][1] & LMP_HV2) {
598 hdev->pkt_type |= (HCI_HV2);
599 hdev->esco_type |= (ESCO_HV2);
602 if (hdev->features[0][1] & LMP_HV3) {
603 hdev->pkt_type |= (HCI_HV3);
604 hdev->esco_type |= (ESCO_HV3);
607 if (lmp_esco_capable(hdev))
608 hdev->esco_type |= (ESCO_EV3);
610 if (hdev->features[0][4] & LMP_EV4)
611 hdev->esco_type |= (ESCO_EV4);
613 if (hdev->features[0][4] & LMP_EV5)
614 hdev->esco_type |= (ESCO_EV5);
616 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
617 hdev->esco_type |= (ESCO_2EV3);
619 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
620 hdev->esco_type |= (ESCO_3EV3);
622 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
623 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
626 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
629 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
631 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
636 if (hdev->max_page < rp->max_page)
637 hdev->max_page = rp->max_page;
639 if (rp->page < HCI_MAX_PAGES)
640 memcpy(hdev->features[rp->page], rp->features, 8);
643 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
646 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
653 hdev->flow_ctl_mode = rp->mode;
656 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
658 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
660 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
665 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
666 hdev->sco_mtu = rp->sco_mtu;
667 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
668 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
670 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
675 hdev->acl_cnt = hdev->acl_pkts;
676 hdev->sco_cnt = hdev->sco_pkts;
678 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
679 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
682 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
684 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
686 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
691 if (test_bit(HCI_INIT, &hdev->flags))
692 bacpy(&hdev->bdaddr, &rp->bdaddr);
694 if (hci_dev_test_flag(hdev, HCI_SETUP))
695 bacpy(&hdev->setup_addr, &rp->bdaddr);
698 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
701 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
703 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
708 if (test_bit(HCI_INIT, &hdev->flags)) {
709 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
710 hdev->page_scan_window = __le16_to_cpu(rp->window);
714 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
717 u8 status = *((u8 *) skb->data);
718 struct hci_cp_write_page_scan_activity *sent;
720 BT_DBG("%s status 0x%2.2x", hdev->name, status);
725 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
729 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
730 hdev->page_scan_window = __le16_to_cpu(sent->window);
733 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
736 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
743 if (test_bit(HCI_INIT, &hdev->flags))
744 hdev->page_scan_type = rp->type;
747 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
750 u8 status = *((u8 *) skb->data);
753 BT_DBG("%s status 0x%2.2x", hdev->name, status);
758 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
760 hdev->page_scan_type = *type;
763 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
766 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
768 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
773 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
774 hdev->block_len = __le16_to_cpu(rp->block_len);
775 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
777 hdev->block_cnt = hdev->num_blocks;
779 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
780 hdev->block_cnt, hdev->block_len);
783 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
785 struct hci_rp_read_clock *rp = (void *) skb->data;
786 struct hci_cp_read_clock *cp;
787 struct hci_conn *conn;
789 BT_DBG("%s", hdev->name);
791 if (skb->len < sizeof(*rp))
799 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
803 if (cp->which == 0x00) {
804 hdev->clock = le32_to_cpu(rp->clock);
808 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
810 conn->clock = le32_to_cpu(rp->clock);
811 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
815 hci_dev_unlock(hdev);
818 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
821 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
823 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
828 hdev->amp_status = rp->amp_status;
829 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
830 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
831 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
832 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
833 hdev->amp_type = rp->amp_type;
834 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
835 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
836 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
837 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
840 a2mp_send_getinfo_rsp(hdev);
843 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
846 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
847 struct amp_assoc *assoc = &hdev->loc_assoc;
848 size_t rem_len, frag_len;
850 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
855 frag_len = skb->len - sizeof(*rp);
856 rem_len = __le16_to_cpu(rp->rem_len);
858 if (rem_len > frag_len) {
859 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
861 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
862 assoc->offset += frag_len;
864 /* Read other fragments */
865 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
870 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
871 assoc->len = assoc->offset + rem_len;
875 /* Send A2MP Rsp when all fragments are received */
876 a2mp_send_getampassoc_rsp(hdev, rp->status);
877 a2mp_send_create_phy_link_req(hdev, rp->status);
880 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
883 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
885 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
890 hdev->inq_tx_power = rp->tx_power;
893 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
895 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
896 struct hci_cp_pin_code_reply *cp;
897 struct hci_conn *conn;
899 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
903 if (hci_dev_test_flag(hdev, HCI_MGMT))
904 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
909 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
913 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
915 conn->pin_length = cp->pin_len;
918 hci_dev_unlock(hdev);
921 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
923 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
925 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
929 if (hci_dev_test_flag(hdev, HCI_MGMT))
930 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
933 hci_dev_unlock(hdev);
936 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
939 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
941 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
946 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
947 hdev->le_pkts = rp->le_max_pkt;
949 hdev->le_cnt = hdev->le_pkts;
951 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
954 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
957 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
959 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
964 memcpy(hdev->le_features, rp->features, 8);
967 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
970 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
972 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
977 hdev->adv_tx_power = rp->tx_power;
980 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
982 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
984 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
988 if (hci_dev_test_flag(hdev, HCI_MGMT))
989 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
992 hci_dev_unlock(hdev);
995 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
998 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1000 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1004 if (hci_dev_test_flag(hdev, HCI_MGMT))
1005 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1006 ACL_LINK, 0, rp->status);
1008 hci_dev_unlock(hdev);
1011 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1013 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1015 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1019 if (hci_dev_test_flag(hdev, HCI_MGMT))
1020 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1023 hci_dev_unlock(hdev);
1026 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1027 struct sk_buff *skb)
1029 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1031 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1035 if (hci_dev_test_flag(hdev, HCI_MGMT))
1036 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1037 ACL_LINK, 0, rp->status);
1039 hci_dev_unlock(hdev);
1042 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1043 struct sk_buff *skb)
1045 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1047 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1050 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
1052 hci_dev_unlock(hdev);
1055 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1056 struct sk_buff *skb)
1058 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1060 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1063 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
1064 rp->hash256, rp->rand256,
1066 hci_dev_unlock(hdev);
1069 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1071 __u8 status = *((__u8 *) skb->data);
1074 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1079 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1085 bacpy(&hdev->random_addr, sent);
1087 hci_dev_unlock(hdev);
1090 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1092 __u8 *sent, status = *((__u8 *) skb->data);
1094 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1099 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1105 /* If we're doing connection initiation as peripheral. Set a
1106 * timeout in case something goes wrong.
1109 struct hci_conn *conn;
1111 hci_dev_set_flag(hdev, HCI_LE_ADV);
1113 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1115 queue_delayed_work(hdev->workqueue,
1116 &conn->le_conn_timeout,
1117 conn->conn_timeout);
1119 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1122 hci_dev_unlock(hdev);
1125 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1127 struct hci_cp_le_set_scan_param *cp;
1128 __u8 status = *((__u8 *) skb->data);
1130 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1135 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1141 hdev->le_scan_type = cp->type;
1143 hci_dev_unlock(hdev);
1146 static bool has_pending_adv_report(struct hci_dev *hdev)
1148 struct discovery_state *d = &hdev->discovery;
1150 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1153 static void clear_pending_adv_report(struct hci_dev *hdev)
1155 struct discovery_state *d = &hdev->discovery;
1157 bacpy(&d->last_adv_addr, BDADDR_ANY);
1158 d->last_adv_data_len = 0;
1161 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1162 u8 bdaddr_type, s8 rssi, u32 flags,
1165 struct discovery_state *d = &hdev->discovery;
1167 bacpy(&d->last_adv_addr, bdaddr);
1168 d->last_adv_addr_type = bdaddr_type;
1169 d->last_adv_rssi = rssi;
1170 d->last_adv_flags = flags;
1171 memcpy(d->last_adv_data, data, len);
1172 d->last_adv_data_len = len;
1175 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1176 struct sk_buff *skb)
1178 struct hci_cp_le_set_scan_enable *cp;
1179 __u8 status = *((__u8 *) skb->data);
1181 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1186 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1192 switch (cp->enable) {
1193 case LE_SCAN_ENABLE:
1194 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1195 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1196 clear_pending_adv_report(hdev);
1199 case LE_SCAN_DISABLE:
1200 /* We do this here instead of when setting DISCOVERY_STOPPED
1201 * since the latter would potentially require waiting for
1202 * inquiry to stop too.
1204 if (has_pending_adv_report(hdev)) {
1205 struct discovery_state *d = &hdev->discovery;
1207 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1208 d->last_adv_addr_type, NULL,
1209 d->last_adv_rssi, d->last_adv_flags,
1211 d->last_adv_data_len, NULL, 0);
1214 /* Cancel this timer so that we don't try to disable scanning
1215 * when it's already disabled.
1217 cancel_delayed_work(&hdev->le_scan_disable);
1219 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1221 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1222 * interrupted scanning due to a connect request. Mark
1223 * therefore discovery as stopped. If this was not
1224 * because of a connect request advertising might have
1225 * been disabled because of active scanning, so
1226 * re-enable it again if necessary.
1228 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1229 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1230 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1231 hdev->discovery.state == DISCOVERY_FINDING)
1232 mgmt_reenable_advertising(hdev);
1237 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1241 hci_dev_unlock(hdev);
1244 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1245 struct sk_buff *skb)
1247 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1249 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1254 hdev->le_white_list_size = rp->size;
1257 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1258 struct sk_buff *skb)
1260 __u8 status = *((__u8 *) skb->data);
1262 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1267 hci_bdaddr_list_clear(&hdev->le_white_list);
1270 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1271 struct sk_buff *skb)
1273 struct hci_cp_le_add_to_white_list *sent;
1274 __u8 status = *((__u8 *) skb->data);
1276 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1281 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1285 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1289 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1290 struct sk_buff *skb)
1292 struct hci_cp_le_del_from_white_list *sent;
1293 __u8 status = *((__u8 *) skb->data);
1295 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1300 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1304 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1308 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1309 struct sk_buff *skb)
1311 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1313 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1318 memcpy(hdev->le_states, rp->le_states, 8);
1321 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1322 struct sk_buff *skb)
1324 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1326 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1331 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1332 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1335 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1336 struct sk_buff *skb)
1338 struct hci_cp_le_write_def_data_len *sent;
1339 __u8 status = *((__u8 *) skb->data);
1341 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1346 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1350 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1351 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1354 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1355 struct sk_buff *skb)
1357 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1359 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1364 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1365 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1366 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1367 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1370 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1371 struct sk_buff *skb)
1373 struct hci_cp_write_le_host_supported *sent;
1374 __u8 status = *((__u8 *) skb->data);
1376 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1381 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1388 hdev->features[1][0] |= LMP_HOST_LE;
1389 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1391 hdev->features[1][0] &= ~LMP_HOST_LE;
1392 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1393 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1397 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1399 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1401 hci_dev_unlock(hdev);
1404 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1406 struct hci_cp_le_set_adv_param *cp;
1407 u8 status = *((u8 *) skb->data);
1409 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1414 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1419 hdev->adv_addr_type = cp->own_address_type;
1420 hci_dev_unlock(hdev);
1423 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1424 struct sk_buff *skb)
1426 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1428 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1429 hdev->name, rp->status, rp->phy_handle);
1434 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1437 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1439 struct hci_rp_read_rssi *rp = (void *) skb->data;
1440 struct hci_conn *conn;
1442 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1449 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1451 conn->rssi = rp->rssi;
1453 hci_dev_unlock(hdev);
1456 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1458 struct hci_cp_read_tx_power *sent;
1459 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1460 struct hci_conn *conn;
1462 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1467 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1473 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1477 switch (sent->type) {
1479 conn->tx_power = rp->tx_power;
1482 conn->max_tx_power = rp->tx_power;
1487 hci_dev_unlock(hdev);
1490 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1492 u8 status = *((u8 *) skb->data);
1495 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1500 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1502 hdev->ssp_debug_mode = *mode;
1505 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1507 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1510 hci_conn_check_pending(hdev);
1514 set_bit(HCI_INQUIRY, &hdev->flags);
1517 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1519 struct hci_cp_create_conn *cp;
1520 struct hci_conn *conn;
1522 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1524 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1530 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1532 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1535 if (conn && conn->state == BT_CONNECT) {
1536 if (status != 0x0c || conn->attempt > 2) {
1537 conn->state = BT_CLOSED;
1538 hci_connect_cfm(conn, status);
1541 conn->state = BT_CONNECT2;
1545 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1548 BT_ERR("No memory for new connection");
1552 hci_dev_unlock(hdev);
1555 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1557 struct hci_cp_add_sco *cp;
1558 struct hci_conn *acl, *sco;
1561 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1566 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1570 handle = __le16_to_cpu(cp->handle);
1572 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1576 acl = hci_conn_hash_lookup_handle(hdev, handle);
1580 sco->state = BT_CLOSED;
1582 hci_connect_cfm(sco, status);
1587 hci_dev_unlock(hdev);
1590 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1592 struct hci_cp_auth_requested *cp;
1593 struct hci_conn *conn;
1595 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1600 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1606 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1608 if (conn->state == BT_CONFIG) {
1609 hci_connect_cfm(conn, status);
1610 hci_conn_drop(conn);
1614 hci_dev_unlock(hdev);
1617 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1619 struct hci_cp_set_conn_encrypt *cp;
1620 struct hci_conn *conn;
1622 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1627 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1633 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1635 if (conn->state == BT_CONFIG) {
1636 hci_connect_cfm(conn, status);
1637 hci_conn_drop(conn);
1641 hci_dev_unlock(hdev);
1644 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1645 struct hci_conn *conn)
1647 if (conn->state != BT_CONFIG || !conn->out)
1650 if (conn->pending_sec_level == BT_SECURITY_SDP)
1653 /* Only request authentication for SSP connections or non-SSP
1654 * devices with sec_level MEDIUM or HIGH or if MITM protection
1657 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1658 conn->pending_sec_level != BT_SECURITY_FIPS &&
1659 conn->pending_sec_level != BT_SECURITY_HIGH &&
1660 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1666 static int hci_resolve_name(struct hci_dev *hdev,
1667 struct inquiry_entry *e)
1669 struct hci_cp_remote_name_req cp;
1671 memset(&cp, 0, sizeof(cp));
1673 bacpy(&cp.bdaddr, &e->data.bdaddr);
1674 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1675 cp.pscan_mode = e->data.pscan_mode;
1676 cp.clock_offset = e->data.clock_offset;
1678 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1681 static bool hci_resolve_next_name(struct hci_dev *hdev)
1683 struct discovery_state *discov = &hdev->discovery;
1684 struct inquiry_entry *e;
1686 if (list_empty(&discov->resolve))
1689 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1693 if (hci_resolve_name(hdev, e) == 0) {
1694 e->name_state = NAME_PENDING;
1701 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1702 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1704 struct discovery_state *discov = &hdev->discovery;
1705 struct inquiry_entry *e;
1707 /* Update the mgmt connected state if necessary. Be careful with
1708 * conn objects that exist but are not (yet) connected however.
1709 * Only those in BT_CONFIG or BT_CONNECTED states can be
1710 * considered connected.
1713 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1714 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1715 mgmt_device_connected(hdev, conn, 0, name, name_len);
1717 if (discov->state == DISCOVERY_STOPPED)
1720 if (discov->state == DISCOVERY_STOPPING)
1721 goto discov_complete;
1723 if (discov->state != DISCOVERY_RESOLVING)
1726 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1727 /* If the device was not found in a list of found devices names of which
1728 * are pending. there is no need to continue resolving a next name as it
1729 * will be done upon receiving another Remote Name Request Complete
1736 e->name_state = NAME_KNOWN;
1737 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1738 e->data.rssi, name, name_len);
1740 e->name_state = NAME_NOT_KNOWN;
1743 if (hci_resolve_next_name(hdev))
1747 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1750 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1752 struct hci_cp_remote_name_req *cp;
1753 struct hci_conn *conn;
1755 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1757 /* If successful wait for the name req complete event before
1758 * checking for the need to do authentication */
1762 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1768 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1770 if (hci_dev_test_flag(hdev, HCI_MGMT))
1771 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1776 if (!hci_outgoing_auth_needed(hdev, conn))
1779 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1780 struct hci_cp_auth_requested auth_cp;
1782 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1784 auth_cp.handle = __cpu_to_le16(conn->handle);
1785 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1786 sizeof(auth_cp), &auth_cp);
1790 hci_dev_unlock(hdev);
1793 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1795 struct hci_cp_read_remote_features *cp;
1796 struct hci_conn *conn;
1798 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1803 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1809 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1811 if (conn->state == BT_CONFIG) {
1812 hci_connect_cfm(conn, status);
1813 hci_conn_drop(conn);
1817 hci_dev_unlock(hdev);
1820 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1822 struct hci_cp_read_remote_ext_features *cp;
1823 struct hci_conn *conn;
1825 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1830 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1836 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1838 if (conn->state == BT_CONFIG) {
1839 hci_connect_cfm(conn, status);
1840 hci_conn_drop(conn);
1844 hci_dev_unlock(hdev);
1847 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1849 struct hci_cp_setup_sync_conn *cp;
1850 struct hci_conn *acl, *sco;
1853 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1858 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1862 handle = __le16_to_cpu(cp->handle);
1864 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1868 acl = hci_conn_hash_lookup_handle(hdev, handle);
1872 sco->state = BT_CLOSED;
1874 hci_connect_cfm(sco, status);
1879 hci_dev_unlock(hdev);
1882 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1884 struct hci_cp_sniff_mode *cp;
1885 struct hci_conn *conn;
1887 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1892 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1898 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1900 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1902 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1903 hci_sco_setup(conn, status);
1906 hci_dev_unlock(hdev);
1909 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1911 struct hci_cp_exit_sniff_mode *cp;
1912 struct hci_conn *conn;
1914 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1919 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1925 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1927 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1929 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1930 hci_sco_setup(conn, status);
1933 hci_dev_unlock(hdev);
1936 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1938 struct hci_cp_disconnect *cp;
1939 struct hci_conn *conn;
1944 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1950 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1952 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1953 conn->dst_type, status);
1955 hci_dev_unlock(hdev);
1958 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1960 struct hci_cp_create_phy_link *cp;
1962 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1964 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1971 struct hci_conn *hcon;
1973 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1977 amp_write_remote_assoc(hdev, cp->phy_handle);
1980 hci_dev_unlock(hdev);
1983 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1985 struct hci_cp_accept_phy_link *cp;
1987 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1992 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1996 amp_write_remote_assoc(hdev, cp->phy_handle);
1999 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2001 struct hci_cp_le_create_conn *cp;
2002 struct hci_conn *conn;
2004 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2006 /* All connection failure handling is taken care of by the
2007 * hci_le_conn_failed function which is triggered by the HCI
2008 * request completion callbacks used for connecting.
2013 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2019 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
2023 /* Store the initiator and responder address information which
2024 * is needed for SMP. These values will not change during the
2025 * lifetime of the connection.
2027 conn->init_addr_type = cp->own_address_type;
2028 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
2029 bacpy(&conn->init_addr, &hdev->random_addr);
2031 bacpy(&conn->init_addr, &hdev->bdaddr);
2033 conn->resp_addr_type = cp->peer_addr_type;
2034 bacpy(&conn->resp_addr, &cp->peer_addr);
2036 /* We don't want the connection attempt to stick around
2037 * indefinitely since LE doesn't have a page timeout concept
2038 * like BR/EDR. Set a timer for any connection that doesn't use
2039 * the white list for connecting.
2041 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
2042 queue_delayed_work(conn->hdev->workqueue,
2043 &conn->le_conn_timeout,
2044 conn->conn_timeout);
2047 hci_dev_unlock(hdev);
2050 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2052 struct hci_cp_le_start_enc *cp;
2053 struct hci_conn *conn;
2055 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2062 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2066 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2070 if (conn->state != BT_CONNECTED)
2073 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2074 hci_conn_drop(conn);
2077 hci_dev_unlock(hdev);
2080 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2082 struct hci_cp_switch_role *cp;
2083 struct hci_conn *conn;
2085 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2090 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2096 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2098 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2100 hci_dev_unlock(hdev);
2103 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2105 __u8 status = *((__u8 *) skb->data);
2106 struct discovery_state *discov = &hdev->discovery;
2107 struct inquiry_entry *e;
2109 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2111 hci_conn_check_pending(hdev);
2113 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2116 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2117 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2119 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2124 if (discov->state != DISCOVERY_FINDING)
2127 if (list_empty(&discov->resolve)) {
2128 /* When BR/EDR inquiry is active and no LE scanning is in
2129 * progress, then change discovery state to indicate completion.
2131 * When running LE scanning and BR/EDR inquiry simultaneously
2132 * and the LE scan already finished, then change the discovery
2133 * state to indicate completion.
2135 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2136 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2137 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2141 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2142 if (e && hci_resolve_name(hdev, e) == 0) {
2143 e->name_state = NAME_PENDING;
2144 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2146 /* When BR/EDR inquiry is active and no LE scanning is in
2147 * progress, then change discovery state to indicate completion.
2149 * When running LE scanning and BR/EDR inquiry simultaneously
2150 * and the LE scan already finished, then change the discovery
2151 * state to indicate completion.
2153 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2154 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2155 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2159 hci_dev_unlock(hdev);
2162 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2164 struct inquiry_data data;
2165 struct inquiry_info *info = (void *) (skb->data + 1);
2166 int num_rsp = *((__u8 *) skb->data);
2168 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2173 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2178 for (; num_rsp; num_rsp--, info++) {
2181 bacpy(&data.bdaddr, &info->bdaddr);
2182 data.pscan_rep_mode = info->pscan_rep_mode;
2183 data.pscan_period_mode = info->pscan_period_mode;
2184 data.pscan_mode = info->pscan_mode;
2185 memcpy(data.dev_class, info->dev_class, 3);
2186 data.clock_offset = info->clock_offset;
2187 data.rssi = HCI_RSSI_INVALID;
2188 data.ssp_mode = 0x00;
2190 flags = hci_inquiry_cache_update(hdev, &data, false);
2192 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2193 info->dev_class, HCI_RSSI_INVALID,
2194 flags, NULL, 0, NULL, 0);
2197 hci_dev_unlock(hdev);
2200 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2202 struct hci_ev_conn_complete *ev = (void *) skb->data;
2203 struct hci_conn *conn;
2205 BT_DBG("%s", hdev->name);
2209 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2211 if (ev->link_type != SCO_LINK)
2214 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2218 conn->type = SCO_LINK;
2222 conn->handle = __le16_to_cpu(ev->handle);
2224 if (conn->type == ACL_LINK) {
2225 conn->state = BT_CONFIG;
2226 hci_conn_hold(conn);
2228 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2229 !hci_find_link_key(hdev, &ev->bdaddr))
2230 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2232 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2234 conn->state = BT_CONNECTED;
2236 hci_debugfs_create_conn(conn);
2237 hci_conn_add_sysfs(conn);
2239 if (test_bit(HCI_AUTH, &hdev->flags))
2240 set_bit(HCI_CONN_AUTH, &conn->flags);
2242 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2243 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2245 /* Get remote features */
2246 if (conn->type == ACL_LINK) {
2247 struct hci_cp_read_remote_features cp;
2248 cp.handle = ev->handle;
2249 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2252 hci_update_page_scan(hdev);
2255 /* Set packet type for incoming connection */
2256 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2257 struct hci_cp_change_conn_ptype cp;
2258 cp.handle = ev->handle;
2259 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2260 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2264 conn->state = BT_CLOSED;
2265 if (conn->type == ACL_LINK)
2266 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2267 conn->dst_type, ev->status);
2270 if (conn->type == ACL_LINK)
2271 hci_sco_setup(conn, ev->status);
2274 hci_connect_cfm(conn, ev->status);
2276 } else if (ev->link_type != ACL_LINK)
2277 hci_connect_cfm(conn, ev->status);
2280 hci_dev_unlock(hdev);
2282 hci_conn_check_pending(hdev);
2285 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2287 struct hci_cp_reject_conn_req cp;
2289 bacpy(&cp.bdaddr, bdaddr);
2290 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2291 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2294 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2296 struct hci_ev_conn_request *ev = (void *) skb->data;
2297 int mask = hdev->link_mode;
2298 struct inquiry_entry *ie;
2299 struct hci_conn *conn;
2302 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2305 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2308 if (!(mask & HCI_LM_ACCEPT)) {
2309 hci_reject_conn(hdev, &ev->bdaddr);
2313 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2315 hci_reject_conn(hdev, &ev->bdaddr);
2319 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2320 * connection. These features are only touched through mgmt so
2321 * only do the checks if HCI_MGMT is set.
2323 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2324 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2325 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2327 hci_reject_conn(hdev, &ev->bdaddr);
2331 /* Connection accepted */
2335 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2337 memcpy(ie->data.dev_class, ev->dev_class, 3);
2339 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2342 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2345 BT_ERR("No memory for new connection");
2346 hci_dev_unlock(hdev);
2351 memcpy(conn->dev_class, ev->dev_class, 3);
2353 hci_dev_unlock(hdev);
2355 if (ev->link_type == ACL_LINK ||
2356 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2357 struct hci_cp_accept_conn_req cp;
2358 conn->state = BT_CONNECT;
2360 bacpy(&cp.bdaddr, &ev->bdaddr);
2362 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2363 cp.role = 0x00; /* Become master */
2365 cp.role = 0x01; /* Remain slave */
2367 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2368 } else if (!(flags & HCI_PROTO_DEFER)) {
2369 struct hci_cp_accept_sync_conn_req cp;
2370 conn->state = BT_CONNECT;
2372 bacpy(&cp.bdaddr, &ev->bdaddr);
2373 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2375 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2376 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2377 cp.max_latency = cpu_to_le16(0xffff);
2378 cp.content_format = cpu_to_le16(hdev->voice_setting);
2379 cp.retrans_effort = 0xff;
2381 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2384 conn->state = BT_CONNECT2;
2385 hci_connect_cfm(conn, 0);
2389 static u8 hci_to_mgmt_reason(u8 err)
2392 case HCI_ERROR_CONNECTION_TIMEOUT:
2393 return MGMT_DEV_DISCONN_TIMEOUT;
2394 case HCI_ERROR_REMOTE_USER_TERM:
2395 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2396 case HCI_ERROR_REMOTE_POWER_OFF:
2397 return MGMT_DEV_DISCONN_REMOTE;
2398 case HCI_ERROR_LOCAL_HOST_TERM:
2399 return MGMT_DEV_DISCONN_LOCAL_HOST;
2401 return MGMT_DEV_DISCONN_UNKNOWN;
2405 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2407 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2408 u8 reason = hci_to_mgmt_reason(ev->reason);
2409 struct hci_conn_params *params;
2410 struct hci_conn *conn;
2411 bool mgmt_connected;
2414 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2418 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2423 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2424 conn->dst_type, ev->status);
2428 conn->state = BT_CLOSED;
2430 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2431 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2432 reason, mgmt_connected);
2434 if (conn->type == ACL_LINK) {
2435 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2436 hci_remove_link_key(hdev, &conn->dst);
2438 hci_update_page_scan(hdev);
2441 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2443 switch (params->auto_connect) {
2444 case HCI_AUTO_CONN_LINK_LOSS:
2445 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2449 case HCI_AUTO_CONN_DIRECT:
2450 case HCI_AUTO_CONN_ALWAYS:
2451 list_del_init(¶ms->action);
2452 list_add(¶ms->action, &hdev->pend_le_conns);
2453 hci_update_background_scan(hdev);
2463 hci_disconn_cfm(conn, ev->reason);
2466 /* Re-enable advertising if necessary, since it might
2467 * have been disabled by the connection. From the
2468 * HCI_LE_Set_Advertise_Enable command description in
2469 * the core specification (v4.0):
2470 * "The Controller shall continue advertising until the Host
2471 * issues an LE_Set_Advertise_Enable command with
2472 * Advertising_Enable set to 0x00 (Advertising is disabled)
2473 * or until a connection is created or until the Advertising
2474 * is timed out due to Directed Advertising."
2476 if (type == LE_LINK)
2477 mgmt_reenable_advertising(hdev);
2480 hci_dev_unlock(hdev);
2483 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2485 struct hci_ev_auth_complete *ev = (void *) skb->data;
2486 struct hci_conn *conn;
2488 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2492 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2497 if (!hci_conn_ssp_enabled(conn) &&
2498 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2499 BT_INFO("re-auth of legacy device is not possible.");
2501 set_bit(HCI_CONN_AUTH, &conn->flags);
2502 conn->sec_level = conn->pending_sec_level;
2505 mgmt_auth_failed(conn, ev->status);
2508 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2509 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2511 if (conn->state == BT_CONFIG) {
2512 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2513 struct hci_cp_set_conn_encrypt cp;
2514 cp.handle = ev->handle;
2516 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2519 conn->state = BT_CONNECTED;
2520 hci_connect_cfm(conn, ev->status);
2521 hci_conn_drop(conn);
2524 hci_auth_cfm(conn, ev->status);
2526 hci_conn_hold(conn);
2527 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2528 hci_conn_drop(conn);
2531 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2533 struct hci_cp_set_conn_encrypt cp;
2534 cp.handle = ev->handle;
2536 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2539 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2540 hci_encrypt_cfm(conn, ev->status, 0x00);
2545 hci_dev_unlock(hdev);
2548 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2550 struct hci_ev_remote_name *ev = (void *) skb->data;
2551 struct hci_conn *conn;
2553 BT_DBG("%s", hdev->name);
2555 hci_conn_check_pending(hdev);
2559 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2561 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2564 if (ev->status == 0)
2565 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2566 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2568 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2574 if (!hci_outgoing_auth_needed(hdev, conn))
2577 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2578 struct hci_cp_auth_requested cp;
2580 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2582 cp.handle = __cpu_to_le16(conn->handle);
2583 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2587 hci_dev_unlock(hdev);
2590 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2592 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2593 struct hci_conn *conn;
2595 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2599 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2605 /* Encryption implies authentication */
2606 set_bit(HCI_CONN_AUTH, &conn->flags);
2607 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2608 conn->sec_level = conn->pending_sec_level;
2610 /* P-256 authentication key implies FIPS */
2611 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2612 set_bit(HCI_CONN_FIPS, &conn->flags);
2614 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2615 conn->type == LE_LINK)
2616 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2618 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2619 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2623 /* We should disregard the current RPA and generate a new one
2624 * whenever the encryption procedure fails.
2626 if (ev->status && conn->type == LE_LINK)
2627 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2629 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2631 if (ev->status && conn->state == BT_CONNECTED) {
2632 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2633 hci_conn_drop(conn);
2637 if (conn->state == BT_CONFIG) {
2639 conn->state = BT_CONNECTED;
2641 /* In Secure Connections Only mode, do not allow any
2642 * connections that are not encrypted with AES-CCM
2643 * using a P-256 authenticated combination key.
2645 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2646 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2647 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2648 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2649 hci_conn_drop(conn);
2653 hci_connect_cfm(conn, ev->status);
2654 hci_conn_drop(conn);
2656 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2659 hci_dev_unlock(hdev);
2662 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2663 struct sk_buff *skb)
2665 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2666 struct hci_conn *conn;
2668 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2672 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2675 set_bit(HCI_CONN_SECURE, &conn->flags);
2677 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2679 hci_key_change_cfm(conn, ev->status);
2682 hci_dev_unlock(hdev);
2685 static void hci_remote_features_evt(struct hci_dev *hdev,
2686 struct sk_buff *skb)
2688 struct hci_ev_remote_features *ev = (void *) skb->data;
2689 struct hci_conn *conn;
2691 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2695 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2700 memcpy(conn->features[0], ev->features, 8);
2702 if (conn->state != BT_CONFIG)
2705 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2706 lmp_ext_feat_capable(conn)) {
2707 struct hci_cp_read_remote_ext_features cp;
2708 cp.handle = ev->handle;
2710 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2715 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2716 struct hci_cp_remote_name_req cp;
2717 memset(&cp, 0, sizeof(cp));
2718 bacpy(&cp.bdaddr, &conn->dst);
2719 cp.pscan_rep_mode = 0x02;
2720 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2721 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2722 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2724 if (!hci_outgoing_auth_needed(hdev, conn)) {
2725 conn->state = BT_CONNECTED;
2726 hci_connect_cfm(conn, ev->status);
2727 hci_conn_drop(conn);
2731 hci_dev_unlock(hdev);
2734 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2736 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2737 u8 status = skb->data[sizeof(*ev)];
2740 skb_pull(skb, sizeof(*ev));
2742 opcode = __le16_to_cpu(ev->opcode);
2745 case HCI_OP_INQUIRY_CANCEL:
2746 hci_cc_inquiry_cancel(hdev, skb);
2749 case HCI_OP_PERIODIC_INQ:
2750 hci_cc_periodic_inq(hdev, skb);
2753 case HCI_OP_EXIT_PERIODIC_INQ:
2754 hci_cc_exit_periodic_inq(hdev, skb);
2757 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2758 hci_cc_remote_name_req_cancel(hdev, skb);
2761 case HCI_OP_ROLE_DISCOVERY:
2762 hci_cc_role_discovery(hdev, skb);
2765 case HCI_OP_READ_LINK_POLICY:
2766 hci_cc_read_link_policy(hdev, skb);
2769 case HCI_OP_WRITE_LINK_POLICY:
2770 hci_cc_write_link_policy(hdev, skb);
2773 case HCI_OP_READ_DEF_LINK_POLICY:
2774 hci_cc_read_def_link_policy(hdev, skb);
2777 case HCI_OP_WRITE_DEF_LINK_POLICY:
2778 hci_cc_write_def_link_policy(hdev, skb);
2782 hci_cc_reset(hdev, skb);
2785 case HCI_OP_READ_STORED_LINK_KEY:
2786 hci_cc_read_stored_link_key(hdev, skb);
2789 case HCI_OP_DELETE_STORED_LINK_KEY:
2790 hci_cc_delete_stored_link_key(hdev, skb);
2793 case HCI_OP_WRITE_LOCAL_NAME:
2794 hci_cc_write_local_name(hdev, skb);
2797 case HCI_OP_READ_LOCAL_NAME:
2798 hci_cc_read_local_name(hdev, skb);
2801 case HCI_OP_WRITE_AUTH_ENABLE:
2802 hci_cc_write_auth_enable(hdev, skb);
2805 case HCI_OP_WRITE_ENCRYPT_MODE:
2806 hci_cc_write_encrypt_mode(hdev, skb);
2809 case HCI_OP_WRITE_SCAN_ENABLE:
2810 hci_cc_write_scan_enable(hdev, skb);
2813 case HCI_OP_READ_CLASS_OF_DEV:
2814 hci_cc_read_class_of_dev(hdev, skb);
2817 case HCI_OP_WRITE_CLASS_OF_DEV:
2818 hci_cc_write_class_of_dev(hdev, skb);
2821 case HCI_OP_READ_VOICE_SETTING:
2822 hci_cc_read_voice_setting(hdev, skb);
2825 case HCI_OP_WRITE_VOICE_SETTING:
2826 hci_cc_write_voice_setting(hdev, skb);
2829 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2830 hci_cc_read_num_supported_iac(hdev, skb);
2833 case HCI_OP_WRITE_SSP_MODE:
2834 hci_cc_write_ssp_mode(hdev, skb);
2837 case HCI_OP_WRITE_SC_SUPPORT:
2838 hci_cc_write_sc_support(hdev, skb);
2841 case HCI_OP_READ_LOCAL_VERSION:
2842 hci_cc_read_local_version(hdev, skb);
2845 case HCI_OP_READ_LOCAL_COMMANDS:
2846 hci_cc_read_local_commands(hdev, skb);
2849 case HCI_OP_READ_LOCAL_FEATURES:
2850 hci_cc_read_local_features(hdev, skb);
2853 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2854 hci_cc_read_local_ext_features(hdev, skb);
2857 case HCI_OP_READ_BUFFER_SIZE:
2858 hci_cc_read_buffer_size(hdev, skb);
2861 case HCI_OP_READ_BD_ADDR:
2862 hci_cc_read_bd_addr(hdev, skb);
2865 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2866 hci_cc_read_page_scan_activity(hdev, skb);
2869 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2870 hci_cc_write_page_scan_activity(hdev, skb);
2873 case HCI_OP_READ_PAGE_SCAN_TYPE:
2874 hci_cc_read_page_scan_type(hdev, skb);
2877 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2878 hci_cc_write_page_scan_type(hdev, skb);
2881 case HCI_OP_READ_DATA_BLOCK_SIZE:
2882 hci_cc_read_data_block_size(hdev, skb);
2885 case HCI_OP_READ_FLOW_CONTROL_MODE:
2886 hci_cc_read_flow_control_mode(hdev, skb);
2889 case HCI_OP_READ_LOCAL_AMP_INFO:
2890 hci_cc_read_local_amp_info(hdev, skb);
2893 case HCI_OP_READ_CLOCK:
2894 hci_cc_read_clock(hdev, skb);
2897 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2898 hci_cc_read_local_amp_assoc(hdev, skb);
2901 case HCI_OP_READ_INQ_RSP_TX_POWER:
2902 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2905 case HCI_OP_PIN_CODE_REPLY:
2906 hci_cc_pin_code_reply(hdev, skb);
2909 case HCI_OP_PIN_CODE_NEG_REPLY:
2910 hci_cc_pin_code_neg_reply(hdev, skb);
2913 case HCI_OP_READ_LOCAL_OOB_DATA:
2914 hci_cc_read_local_oob_data(hdev, skb);
2917 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2918 hci_cc_read_local_oob_ext_data(hdev, skb);
2921 case HCI_OP_LE_READ_BUFFER_SIZE:
2922 hci_cc_le_read_buffer_size(hdev, skb);
2925 case HCI_OP_LE_READ_LOCAL_FEATURES:
2926 hci_cc_le_read_local_features(hdev, skb);
2929 case HCI_OP_LE_READ_ADV_TX_POWER:
2930 hci_cc_le_read_adv_tx_power(hdev, skb);
2933 case HCI_OP_USER_CONFIRM_REPLY:
2934 hci_cc_user_confirm_reply(hdev, skb);
2937 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2938 hci_cc_user_confirm_neg_reply(hdev, skb);
2941 case HCI_OP_USER_PASSKEY_REPLY:
2942 hci_cc_user_passkey_reply(hdev, skb);
2945 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2946 hci_cc_user_passkey_neg_reply(hdev, skb);
2949 case HCI_OP_LE_SET_RANDOM_ADDR:
2950 hci_cc_le_set_random_addr(hdev, skb);
2953 case HCI_OP_LE_SET_ADV_ENABLE:
2954 hci_cc_le_set_adv_enable(hdev, skb);
2957 case HCI_OP_LE_SET_SCAN_PARAM:
2958 hci_cc_le_set_scan_param(hdev, skb);
2961 case HCI_OP_LE_SET_SCAN_ENABLE:
2962 hci_cc_le_set_scan_enable(hdev, skb);
2965 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2966 hci_cc_le_read_white_list_size(hdev, skb);
2969 case HCI_OP_LE_CLEAR_WHITE_LIST:
2970 hci_cc_le_clear_white_list(hdev, skb);
2973 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2974 hci_cc_le_add_to_white_list(hdev, skb);
2977 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2978 hci_cc_le_del_from_white_list(hdev, skb);
2981 case HCI_OP_LE_READ_SUPPORTED_STATES:
2982 hci_cc_le_read_supported_states(hdev, skb);
2985 case HCI_OP_LE_READ_DEF_DATA_LEN:
2986 hci_cc_le_read_def_data_len(hdev, skb);
2989 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
2990 hci_cc_le_write_def_data_len(hdev, skb);
2993 case HCI_OP_LE_READ_MAX_DATA_LEN:
2994 hci_cc_le_read_max_data_len(hdev, skb);
2997 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2998 hci_cc_write_le_host_supported(hdev, skb);
3001 case HCI_OP_LE_SET_ADV_PARAM:
3002 hci_cc_set_adv_param(hdev, skb);
3005 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
3006 hci_cc_write_remote_amp_assoc(hdev, skb);
3009 case HCI_OP_READ_RSSI:
3010 hci_cc_read_rssi(hdev, skb);
3013 case HCI_OP_READ_TX_POWER:
3014 hci_cc_read_tx_power(hdev, skb);
3017 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3018 hci_cc_write_ssp_debug_mode(hdev, skb);
3022 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3026 if (opcode != HCI_OP_NOP)
3027 cancel_delayed_work(&hdev->cmd_timer);
3029 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3030 atomic_set(&hdev->cmd_cnt, 1);
3032 hci_req_cmd_complete(hdev, opcode, status);
3034 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3035 queue_work(hdev->workqueue, &hdev->cmd_work);
3038 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
3040 struct hci_ev_cmd_status *ev = (void *) skb->data;
3043 skb_pull(skb, sizeof(*ev));
3045 opcode = __le16_to_cpu(ev->opcode);
3048 case HCI_OP_INQUIRY:
3049 hci_cs_inquiry(hdev, ev->status);
3052 case HCI_OP_CREATE_CONN:
3053 hci_cs_create_conn(hdev, ev->status);
3056 case HCI_OP_DISCONNECT:
3057 hci_cs_disconnect(hdev, ev->status);
3060 case HCI_OP_ADD_SCO:
3061 hci_cs_add_sco(hdev, ev->status);
3064 case HCI_OP_AUTH_REQUESTED:
3065 hci_cs_auth_requested(hdev, ev->status);
3068 case HCI_OP_SET_CONN_ENCRYPT:
3069 hci_cs_set_conn_encrypt(hdev, ev->status);
3072 case HCI_OP_REMOTE_NAME_REQ:
3073 hci_cs_remote_name_req(hdev, ev->status);
3076 case HCI_OP_READ_REMOTE_FEATURES:
3077 hci_cs_read_remote_features(hdev, ev->status);
3080 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3081 hci_cs_read_remote_ext_features(hdev, ev->status);
3084 case HCI_OP_SETUP_SYNC_CONN:
3085 hci_cs_setup_sync_conn(hdev, ev->status);
3088 case HCI_OP_CREATE_PHY_LINK:
3089 hci_cs_create_phylink(hdev, ev->status);
3092 case HCI_OP_ACCEPT_PHY_LINK:
3093 hci_cs_accept_phylink(hdev, ev->status);
3096 case HCI_OP_SNIFF_MODE:
3097 hci_cs_sniff_mode(hdev, ev->status);
3100 case HCI_OP_EXIT_SNIFF_MODE:
3101 hci_cs_exit_sniff_mode(hdev, ev->status);
3104 case HCI_OP_SWITCH_ROLE:
3105 hci_cs_switch_role(hdev, ev->status);
3108 case HCI_OP_LE_CREATE_CONN:
3109 hci_cs_le_create_conn(hdev, ev->status);
3112 case HCI_OP_LE_START_ENC:
3113 hci_cs_le_start_enc(hdev, ev->status);
3117 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3121 if (opcode != HCI_OP_NOP)
3122 cancel_delayed_work(&hdev->cmd_timer);
3124 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3125 atomic_set(&hdev->cmd_cnt, 1);
3128 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
3129 hci_req_cmd_complete(hdev, opcode, ev->status);
3131 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3132 queue_work(hdev->workqueue, &hdev->cmd_work);
3135 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3137 struct hci_ev_hardware_error *ev = (void *) skb->data;
3139 hdev->hw_error_code = ev->code;
3141 queue_work(hdev->req_workqueue, &hdev->error_reset);
3144 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3146 struct hci_ev_role_change *ev = (void *) skb->data;
3147 struct hci_conn *conn;
3149 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3153 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3156 conn->role = ev->role;
3158 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3160 hci_role_switch_cfm(conn, ev->status, ev->role);
3163 hci_dev_unlock(hdev);
3166 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3168 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3171 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3172 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3176 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3177 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3178 BT_DBG("%s bad parameters", hdev->name);
3182 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3184 for (i = 0; i < ev->num_hndl; i++) {
3185 struct hci_comp_pkts_info *info = &ev->handles[i];
3186 struct hci_conn *conn;
3187 __u16 handle, count;
3189 handle = __le16_to_cpu(info->handle);
3190 count = __le16_to_cpu(info->count);
3192 conn = hci_conn_hash_lookup_handle(hdev, handle);
3196 conn->sent -= count;
3198 switch (conn->type) {
3200 hdev->acl_cnt += count;
3201 if (hdev->acl_cnt > hdev->acl_pkts)
3202 hdev->acl_cnt = hdev->acl_pkts;
3206 if (hdev->le_pkts) {
3207 hdev->le_cnt += count;
3208 if (hdev->le_cnt > hdev->le_pkts)
3209 hdev->le_cnt = hdev->le_pkts;
3211 hdev->acl_cnt += count;
3212 if (hdev->acl_cnt > hdev->acl_pkts)
3213 hdev->acl_cnt = hdev->acl_pkts;
3218 hdev->sco_cnt += count;
3219 if (hdev->sco_cnt > hdev->sco_pkts)
3220 hdev->sco_cnt = hdev->sco_pkts;
3224 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3229 queue_work(hdev->workqueue, &hdev->tx_work);
3232 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3235 struct hci_chan *chan;
3237 switch (hdev->dev_type) {
3239 return hci_conn_hash_lookup_handle(hdev, handle);
3241 chan = hci_chan_lookup_handle(hdev, handle);
3246 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3253 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3255 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3258 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3259 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3263 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3264 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3265 BT_DBG("%s bad parameters", hdev->name);
3269 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3272 for (i = 0; i < ev->num_hndl; i++) {
3273 struct hci_comp_blocks_info *info = &ev->handles[i];
3274 struct hci_conn *conn = NULL;
3275 __u16 handle, block_count;
3277 handle = __le16_to_cpu(info->handle);
3278 block_count = __le16_to_cpu(info->blocks);
3280 conn = __hci_conn_lookup_handle(hdev, handle);
3284 conn->sent -= block_count;
3286 switch (conn->type) {
3289 hdev->block_cnt += block_count;
3290 if (hdev->block_cnt > hdev->num_blocks)
3291 hdev->block_cnt = hdev->num_blocks;
3295 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3300 queue_work(hdev->workqueue, &hdev->tx_work);
3303 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3305 struct hci_ev_mode_change *ev = (void *) skb->data;
3306 struct hci_conn *conn;
3308 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3312 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3314 conn->mode = ev->mode;
3316 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3318 if (conn->mode == HCI_CM_ACTIVE)
3319 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3321 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3324 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3325 hci_sco_setup(conn, ev->status);
3328 hci_dev_unlock(hdev);
3331 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3333 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3334 struct hci_conn *conn;
3336 BT_DBG("%s", hdev->name);
3340 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3344 if (conn->state == BT_CONNECTED) {
3345 hci_conn_hold(conn);
3346 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3347 hci_conn_drop(conn);
3350 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3351 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3352 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3353 sizeof(ev->bdaddr), &ev->bdaddr);
3354 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3357 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3362 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3366 hci_dev_unlock(hdev);
3369 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3371 if (key_type == HCI_LK_CHANGED_COMBINATION)
3374 conn->pin_length = pin_len;
3375 conn->key_type = key_type;
3378 case HCI_LK_LOCAL_UNIT:
3379 case HCI_LK_REMOTE_UNIT:
3380 case HCI_LK_DEBUG_COMBINATION:
3382 case HCI_LK_COMBINATION:
3384 conn->pending_sec_level = BT_SECURITY_HIGH;
3386 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3388 case HCI_LK_UNAUTH_COMBINATION_P192:
3389 case HCI_LK_UNAUTH_COMBINATION_P256:
3390 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3392 case HCI_LK_AUTH_COMBINATION_P192:
3393 conn->pending_sec_level = BT_SECURITY_HIGH;
3395 case HCI_LK_AUTH_COMBINATION_P256:
3396 conn->pending_sec_level = BT_SECURITY_FIPS;
3401 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3403 struct hci_ev_link_key_req *ev = (void *) skb->data;
3404 struct hci_cp_link_key_reply cp;
3405 struct hci_conn *conn;
3406 struct link_key *key;
3408 BT_DBG("%s", hdev->name);
3410 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3415 key = hci_find_link_key(hdev, &ev->bdaddr);
3417 BT_DBG("%s link key not found for %pMR", hdev->name,
3422 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3425 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3427 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3429 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3430 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3431 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3432 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3436 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3437 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3438 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3439 BT_DBG("%s ignoring key unauthenticated for high security",
3444 conn_set_key(conn, key->type, key->pin_len);
3447 bacpy(&cp.bdaddr, &ev->bdaddr);
3448 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3450 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3452 hci_dev_unlock(hdev);
3457 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3458 hci_dev_unlock(hdev);
3461 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3463 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3464 struct hci_conn *conn;
3465 struct link_key *key;
3469 BT_DBG("%s", hdev->name);
3473 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3477 hci_conn_hold(conn);
3478 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3479 hci_conn_drop(conn);
3481 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3482 conn_set_key(conn, ev->key_type, conn->pin_length);
3484 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3487 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3488 ev->key_type, pin_len, &persistent);
3492 /* Update connection information since adding the key will have
3493 * fixed up the type in the case of changed combination keys.
3495 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3496 conn_set_key(conn, key->type, key->pin_len);
3498 mgmt_new_link_key(hdev, key, persistent);
3500 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3501 * is set. If it's not set simply remove the key from the kernel
3502 * list (we've still notified user space about it but with
3503 * store_hint being 0).
3505 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3506 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3507 list_del_rcu(&key->list);
3508 kfree_rcu(key, rcu);
3513 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3515 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3518 hci_dev_unlock(hdev);
3521 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3523 struct hci_ev_clock_offset *ev = (void *) skb->data;
3524 struct hci_conn *conn;
3526 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3530 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3531 if (conn && !ev->status) {
3532 struct inquiry_entry *ie;
3534 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3536 ie->data.clock_offset = ev->clock_offset;
3537 ie->timestamp = jiffies;
3541 hci_dev_unlock(hdev);
3544 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3546 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3547 struct hci_conn *conn;
3549 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3553 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3554 if (conn && !ev->status)
3555 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3557 hci_dev_unlock(hdev);
3560 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3562 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3563 struct inquiry_entry *ie;
3565 BT_DBG("%s", hdev->name);
3569 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3571 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3572 ie->timestamp = jiffies;
3575 hci_dev_unlock(hdev);
3578 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3579 struct sk_buff *skb)
3581 struct inquiry_data data;
3582 int num_rsp = *((__u8 *) skb->data);
3584 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3589 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3594 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3595 struct inquiry_info_with_rssi_and_pscan_mode *info;
3596 info = (void *) (skb->data + 1);
3598 for (; num_rsp; num_rsp--, info++) {
3601 bacpy(&data.bdaddr, &info->bdaddr);
3602 data.pscan_rep_mode = info->pscan_rep_mode;
3603 data.pscan_period_mode = info->pscan_period_mode;
3604 data.pscan_mode = info->pscan_mode;
3605 memcpy(data.dev_class, info->dev_class, 3);
3606 data.clock_offset = info->clock_offset;
3607 data.rssi = info->rssi;
3608 data.ssp_mode = 0x00;
3610 flags = hci_inquiry_cache_update(hdev, &data, false);
3612 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3613 info->dev_class, info->rssi,
3614 flags, NULL, 0, NULL, 0);
3617 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3619 for (; num_rsp; num_rsp--, info++) {
3622 bacpy(&data.bdaddr, &info->bdaddr);
3623 data.pscan_rep_mode = info->pscan_rep_mode;
3624 data.pscan_period_mode = info->pscan_period_mode;
3625 data.pscan_mode = 0x00;
3626 memcpy(data.dev_class, info->dev_class, 3);
3627 data.clock_offset = info->clock_offset;
3628 data.rssi = info->rssi;
3629 data.ssp_mode = 0x00;
3631 flags = hci_inquiry_cache_update(hdev, &data, false);
3633 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3634 info->dev_class, info->rssi,
3635 flags, NULL, 0, NULL, 0);
3639 hci_dev_unlock(hdev);
3642 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3643 struct sk_buff *skb)
3645 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3646 struct hci_conn *conn;
3648 BT_DBG("%s", hdev->name);
3652 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3656 if (ev->page < HCI_MAX_PAGES)
3657 memcpy(conn->features[ev->page], ev->features, 8);
3659 if (!ev->status && ev->page == 0x01) {
3660 struct inquiry_entry *ie;
3662 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3664 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3666 if (ev->features[0] & LMP_HOST_SSP) {
3667 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3669 /* It is mandatory by the Bluetooth specification that
3670 * Extended Inquiry Results are only used when Secure
3671 * Simple Pairing is enabled, but some devices violate
3674 * To make these devices work, the internal SSP
3675 * enabled flag needs to be cleared if the remote host
3676 * features do not indicate SSP support */
3677 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3680 if (ev->features[0] & LMP_HOST_SC)
3681 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3684 if (conn->state != BT_CONFIG)
3687 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3688 struct hci_cp_remote_name_req cp;
3689 memset(&cp, 0, sizeof(cp));
3690 bacpy(&cp.bdaddr, &conn->dst);
3691 cp.pscan_rep_mode = 0x02;
3692 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3693 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3694 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3696 if (!hci_outgoing_auth_needed(hdev, conn)) {
3697 conn->state = BT_CONNECTED;
3698 hci_connect_cfm(conn, ev->status);
3699 hci_conn_drop(conn);
3703 hci_dev_unlock(hdev);
3706 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3707 struct sk_buff *skb)
3709 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3710 struct hci_conn *conn;
3712 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3716 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3718 if (ev->link_type == ESCO_LINK)
3721 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3725 conn->type = SCO_LINK;
3728 switch (ev->status) {
3730 conn->handle = __le16_to_cpu(ev->handle);
3731 conn->state = BT_CONNECTED;
3733 hci_debugfs_create_conn(conn);
3734 hci_conn_add_sysfs(conn);
3737 case 0x10: /* Connection Accept Timeout */
3738 case 0x0d: /* Connection Rejected due to Limited Resources */
3739 case 0x11: /* Unsupported Feature or Parameter Value */
3740 case 0x1c: /* SCO interval rejected */
3741 case 0x1a: /* Unsupported Remote Feature */
3742 case 0x1f: /* Unspecified error */
3743 case 0x20: /* Unsupported LMP Parameter value */
3745 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3746 (hdev->esco_type & EDR_ESCO_MASK);
3747 if (hci_setup_sync(conn, conn->link->handle))
3753 conn->state = BT_CLOSED;
3757 hci_connect_cfm(conn, ev->status);
3762 hci_dev_unlock(hdev);
3765 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3769 while (parsed < eir_len) {
3770 u8 field_len = eir[0];
3775 parsed += field_len + 1;
3776 eir += field_len + 1;
3782 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3783 struct sk_buff *skb)
3785 struct inquiry_data data;
3786 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3787 int num_rsp = *((__u8 *) skb->data);
3790 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3795 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3800 for (; num_rsp; num_rsp--, info++) {
3804 bacpy(&data.bdaddr, &info->bdaddr);
3805 data.pscan_rep_mode = info->pscan_rep_mode;
3806 data.pscan_period_mode = info->pscan_period_mode;
3807 data.pscan_mode = 0x00;
3808 memcpy(data.dev_class, info->dev_class, 3);
3809 data.clock_offset = info->clock_offset;
3810 data.rssi = info->rssi;
3811 data.ssp_mode = 0x01;
3813 if (hci_dev_test_flag(hdev, HCI_MGMT))
3814 name_known = eir_has_data_type(info->data,
3820 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3822 eir_len = eir_get_length(info->data, sizeof(info->data));
3824 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3825 info->dev_class, info->rssi,
3826 flags, info->data, eir_len, NULL, 0);
3829 hci_dev_unlock(hdev);
3832 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3833 struct sk_buff *skb)
3835 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3836 struct hci_conn *conn;
3838 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3839 __le16_to_cpu(ev->handle));
3843 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3847 /* For BR/EDR the necessary steps are taken through the
3848 * auth_complete event.
3850 if (conn->type != LE_LINK)
3854 conn->sec_level = conn->pending_sec_level;
3856 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3858 if (ev->status && conn->state == BT_CONNECTED) {
3859 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3860 hci_conn_drop(conn);
3864 if (conn->state == BT_CONFIG) {
3866 conn->state = BT_CONNECTED;
3868 hci_connect_cfm(conn, ev->status);
3869 hci_conn_drop(conn);
3871 hci_auth_cfm(conn, ev->status);
3873 hci_conn_hold(conn);
3874 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3875 hci_conn_drop(conn);
3879 hci_dev_unlock(hdev);
3882 static u8 hci_get_auth_req(struct hci_conn *conn)
3884 /* If remote requests no-bonding follow that lead */
3885 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3886 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3887 return conn->remote_auth | (conn->auth_type & 0x01);
3889 /* If both remote and local have enough IO capabilities, require
3892 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3893 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3894 return conn->remote_auth | 0x01;
3896 /* No MITM protection possible so ignore remote requirement */
3897 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3900 static u8 bredr_oob_data_present(struct hci_conn *conn)
3902 struct hci_dev *hdev = conn->hdev;
3903 struct oob_data *data;
3905 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3909 if (bredr_sc_enabled(hdev)) {
3910 /* When Secure Connections is enabled, then just
3911 * return the present value stored with the OOB
3912 * data. The stored value contains the right present
3913 * information. However it can only be trusted when
3914 * not in Secure Connection Only mode.
3916 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
3917 return data->present;
3919 /* When Secure Connections Only mode is enabled, then
3920 * the P-256 values are required. If they are not
3921 * available, then do not declare that OOB data is
3924 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
3925 !memcmp(data->hash256, ZERO_KEY, 16))
3931 /* When Secure Connections is not enabled or actually
3932 * not supported by the hardware, then check that if
3933 * P-192 data values are present.
3935 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
3936 !memcmp(data->hash192, ZERO_KEY, 16))
3942 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3944 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3945 struct hci_conn *conn;
3947 BT_DBG("%s", hdev->name);
3951 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3955 hci_conn_hold(conn);
3957 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3960 /* Allow pairing if we're pairable, the initiators of the
3961 * pairing or if the remote is not requesting bonding.
3963 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
3964 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3965 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3966 struct hci_cp_io_capability_reply cp;
3968 bacpy(&cp.bdaddr, &ev->bdaddr);
3969 /* Change the IO capability from KeyboardDisplay
3970 * to DisplayYesNo as it is not supported by BT spec. */
3971 cp.capability = (conn->io_capability == 0x04) ?
3972 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3974 /* If we are initiators, there is no remote information yet */
3975 if (conn->remote_auth == 0xff) {
3976 /* Request MITM protection if our IO caps allow it
3977 * except for the no-bonding case.
3979 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3980 conn->auth_type != HCI_AT_NO_BONDING)
3981 conn->auth_type |= 0x01;
3983 conn->auth_type = hci_get_auth_req(conn);
3986 /* If we're not bondable, force one of the non-bondable
3987 * authentication requirement values.
3989 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
3990 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3992 cp.authentication = conn->auth_type;
3993 cp.oob_data = bredr_oob_data_present(conn);
3995 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3998 struct hci_cp_io_capability_neg_reply cp;
4000 bacpy(&cp.bdaddr, &ev->bdaddr);
4001 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4003 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4008 hci_dev_unlock(hdev);
4011 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4013 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4014 struct hci_conn *conn;
4016 BT_DBG("%s", hdev->name);
4020 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4024 conn->remote_cap = ev->capability;
4025 conn->remote_auth = ev->authentication;
4028 hci_dev_unlock(hdev);
4031 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4032 struct sk_buff *skb)
4034 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4035 int loc_mitm, rem_mitm, confirm_hint = 0;
4036 struct hci_conn *conn;
4038 BT_DBG("%s", hdev->name);
4042 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4045 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4049 loc_mitm = (conn->auth_type & 0x01);
4050 rem_mitm = (conn->remote_auth & 0x01);
4052 /* If we require MITM but the remote device can't provide that
4053 * (it has NoInputNoOutput) then reject the confirmation
4054 * request. We check the security level here since it doesn't
4055 * necessarily match conn->auth_type.
4057 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4058 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4059 BT_DBG("Rejecting request: remote device can't provide MITM");
4060 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4061 sizeof(ev->bdaddr), &ev->bdaddr);
4065 /* If no side requires MITM protection; auto-accept */
4066 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4067 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4069 /* If we're not the initiators request authorization to
4070 * proceed from user space (mgmt_user_confirm with
4071 * confirm_hint set to 1). The exception is if neither
4072 * side had MITM or if the local IO capability is
4073 * NoInputNoOutput, in which case we do auto-accept
4075 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4076 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4077 (loc_mitm || rem_mitm)) {
4078 BT_DBG("Confirming auto-accept as acceptor");
4083 BT_DBG("Auto-accept of user confirmation with %ums delay",
4084 hdev->auto_accept_delay);
4086 if (hdev->auto_accept_delay > 0) {
4087 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4088 queue_delayed_work(conn->hdev->workqueue,
4089 &conn->auto_accept_work, delay);
4093 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4094 sizeof(ev->bdaddr), &ev->bdaddr);
4099 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4100 le32_to_cpu(ev->passkey), confirm_hint);
4103 hci_dev_unlock(hdev);
4106 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4107 struct sk_buff *skb)
4109 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4111 BT_DBG("%s", hdev->name);
4113 if (hci_dev_test_flag(hdev, HCI_MGMT))
4114 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4117 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4118 struct sk_buff *skb)
4120 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4121 struct hci_conn *conn;
4123 BT_DBG("%s", hdev->name);
4125 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4129 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4130 conn->passkey_entered = 0;
4132 if (hci_dev_test_flag(hdev, HCI_MGMT))
4133 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4134 conn->dst_type, conn->passkey_notify,
4135 conn->passkey_entered);
4138 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4140 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4141 struct hci_conn *conn;
4143 BT_DBG("%s", hdev->name);
4145 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4150 case HCI_KEYPRESS_STARTED:
4151 conn->passkey_entered = 0;
4154 case HCI_KEYPRESS_ENTERED:
4155 conn->passkey_entered++;
4158 case HCI_KEYPRESS_ERASED:
4159 conn->passkey_entered--;
4162 case HCI_KEYPRESS_CLEARED:
4163 conn->passkey_entered = 0;
4166 case HCI_KEYPRESS_COMPLETED:
4170 if (hci_dev_test_flag(hdev, HCI_MGMT))
4171 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4172 conn->dst_type, conn->passkey_notify,
4173 conn->passkey_entered);
4176 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4177 struct sk_buff *skb)
4179 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4180 struct hci_conn *conn;
4182 BT_DBG("%s", hdev->name);
4186 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4190 /* Reset the authentication requirement to unknown */
4191 conn->remote_auth = 0xff;
4193 /* To avoid duplicate auth_failed events to user space we check
4194 * the HCI_CONN_AUTH_PEND flag which will be set if we
4195 * initiated the authentication. A traditional auth_complete
4196 * event gets always produced as initiator and is also mapped to
4197 * the mgmt_auth_failed event */
4198 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4199 mgmt_auth_failed(conn, ev->status);
4201 hci_conn_drop(conn);
4204 hci_dev_unlock(hdev);
4207 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4208 struct sk_buff *skb)
4210 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4211 struct inquiry_entry *ie;
4212 struct hci_conn *conn;
4214 BT_DBG("%s", hdev->name);
4218 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4220 memcpy(conn->features[1], ev->features, 8);
4222 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4224 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4226 hci_dev_unlock(hdev);
4229 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4230 struct sk_buff *skb)
4232 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4233 struct oob_data *data;
4235 BT_DBG("%s", hdev->name);
4239 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4242 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4244 struct hci_cp_remote_oob_data_neg_reply cp;
4246 bacpy(&cp.bdaddr, &ev->bdaddr);
4247 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4252 if (bredr_sc_enabled(hdev)) {
4253 struct hci_cp_remote_oob_ext_data_reply cp;
4255 bacpy(&cp.bdaddr, &ev->bdaddr);
4256 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4257 memset(cp.hash192, 0, sizeof(cp.hash192));
4258 memset(cp.rand192, 0, sizeof(cp.rand192));
4260 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4261 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4263 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4264 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4266 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4269 struct hci_cp_remote_oob_data_reply cp;
4271 bacpy(&cp.bdaddr, &ev->bdaddr);
4272 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4273 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4275 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4280 hci_dev_unlock(hdev);
4283 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4284 struct sk_buff *skb)
4286 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4287 struct hci_conn *hcon, *bredr_hcon;
4289 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4294 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4296 hci_dev_unlock(hdev);
4302 hci_dev_unlock(hdev);
4306 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4308 hcon->state = BT_CONNECTED;
4309 bacpy(&hcon->dst, &bredr_hcon->dst);
4311 hci_conn_hold(hcon);
4312 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4313 hci_conn_drop(hcon);
4315 hci_debugfs_create_conn(hcon);
4316 hci_conn_add_sysfs(hcon);
4318 amp_physical_cfm(bredr_hcon, hcon);
4320 hci_dev_unlock(hdev);
4323 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4325 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4326 struct hci_conn *hcon;
4327 struct hci_chan *hchan;
4328 struct amp_mgr *mgr;
4330 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4331 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4334 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4338 /* Create AMP hchan */
4339 hchan = hci_chan_create(hcon);
4343 hchan->handle = le16_to_cpu(ev->handle);
4345 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4347 mgr = hcon->amp_mgr;
4348 if (mgr && mgr->bredr_chan) {
4349 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4351 l2cap_chan_lock(bredr_chan);
4353 bredr_chan->conn->mtu = hdev->block_mtu;
4354 l2cap_logical_cfm(bredr_chan, hchan, 0);
4355 hci_conn_hold(hcon);
4357 l2cap_chan_unlock(bredr_chan);
4361 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4362 struct sk_buff *skb)
4364 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4365 struct hci_chan *hchan;
4367 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4368 le16_to_cpu(ev->handle), ev->status);
4375 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4379 amp_destroy_logical_link(hchan, ev->reason);
4382 hci_dev_unlock(hdev);
4385 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4386 struct sk_buff *skb)
4388 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4389 struct hci_conn *hcon;
4391 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4398 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4400 hcon->state = BT_CLOSED;
4404 hci_dev_unlock(hdev);
4407 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4409 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4410 struct hci_conn_params *params;
4411 struct hci_conn *conn;
4412 struct smp_irk *irk;
4415 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4419 /* All controllers implicitly stop advertising in the event of a
4420 * connection, so ensure that the state bit is cleared.
4422 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4424 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4426 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4428 BT_ERR("No memory for new connection");
4432 conn->dst_type = ev->bdaddr_type;
4434 /* If we didn't have a hci_conn object previously
4435 * but we're in master role this must be something
4436 * initiated using a white list. Since white list based
4437 * connections are not "first class citizens" we don't
4438 * have full tracking of them. Therefore, we go ahead
4439 * with a "best effort" approach of determining the
4440 * initiator address based on the HCI_PRIVACY flag.
4443 conn->resp_addr_type = ev->bdaddr_type;
4444 bacpy(&conn->resp_addr, &ev->bdaddr);
4445 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4446 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4447 bacpy(&conn->init_addr, &hdev->rpa);
4449 hci_copy_identity_address(hdev,
4451 &conn->init_addr_type);
4455 cancel_delayed_work(&conn->le_conn_timeout);
4459 /* Set the responder (our side) address type based on
4460 * the advertising address type.
4462 conn->resp_addr_type = hdev->adv_addr_type;
4463 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4464 bacpy(&conn->resp_addr, &hdev->random_addr);
4466 bacpy(&conn->resp_addr, &hdev->bdaddr);
4468 conn->init_addr_type = ev->bdaddr_type;
4469 bacpy(&conn->init_addr, &ev->bdaddr);
4471 /* For incoming connections, set the default minimum
4472 * and maximum connection interval. They will be used
4473 * to check if the parameters are in range and if not
4474 * trigger the connection update procedure.
4476 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4477 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4480 /* Lookup the identity address from the stored connection
4481 * address and address type.
4483 * When establishing connections to an identity address, the
4484 * connection procedure will store the resolvable random
4485 * address first. Now if it can be converted back into the
4486 * identity address, start using the identity address from
4489 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4491 bacpy(&conn->dst, &irk->bdaddr);
4492 conn->dst_type = irk->addr_type;
4496 hci_le_conn_failed(conn, ev->status);
4500 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4501 addr_type = BDADDR_LE_PUBLIC;
4503 addr_type = BDADDR_LE_RANDOM;
4505 /* Drop the connection if the device is blocked */
4506 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4507 hci_conn_drop(conn);
4511 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4512 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4514 conn->sec_level = BT_SECURITY_LOW;
4515 conn->handle = __le16_to_cpu(ev->handle);
4516 conn->state = BT_CONNECTED;
4518 conn->le_conn_interval = le16_to_cpu(ev->interval);
4519 conn->le_conn_latency = le16_to_cpu(ev->latency);
4520 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4522 hci_debugfs_create_conn(conn);
4523 hci_conn_add_sysfs(conn);
4525 hci_connect_cfm(conn, ev->status);
4527 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4530 list_del_init(¶ms->action);
4532 hci_conn_drop(params->conn);
4533 hci_conn_put(params->conn);
4534 params->conn = NULL;
4539 hci_update_background_scan(hdev);
4540 hci_dev_unlock(hdev);
4543 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4544 struct sk_buff *skb)
4546 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4547 struct hci_conn *conn;
4549 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4556 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4558 conn->le_conn_interval = le16_to_cpu(ev->interval);
4559 conn->le_conn_latency = le16_to_cpu(ev->latency);
4560 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4563 hci_dev_unlock(hdev);
4566 /* This function requires the caller holds hdev->lock */
4567 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4569 u8 addr_type, u8 adv_type)
4571 struct hci_conn *conn;
4572 struct hci_conn_params *params;
4574 /* If the event is not connectable don't proceed further */
4575 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4578 /* Ignore if the device is blocked */
4579 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4582 /* Most controller will fail if we try to create new connections
4583 * while we have an existing one in slave role.
4585 if (hdev->conn_hash.le_num_slave > 0)
4588 /* If we're not connectable only connect devices that we have in
4589 * our pend_le_conns list.
4591 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4596 switch (params->auto_connect) {
4597 case HCI_AUTO_CONN_DIRECT:
4598 /* Only devices advertising with ADV_DIRECT_IND are
4599 * triggering a connection attempt. This is allowing
4600 * incoming connections from slave devices.
4602 if (adv_type != LE_ADV_DIRECT_IND)
4605 case HCI_AUTO_CONN_ALWAYS:
4606 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4607 * are triggering a connection attempt. This means
4608 * that incoming connectioms from slave device are
4609 * accepted and also outgoing connections to slave
4610 * devices are established when found.
4617 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4618 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4619 if (!IS_ERR(conn)) {
4620 /* Store the pointer since we don't really have any
4621 * other owner of the object besides the params that
4622 * triggered it. This way we can abort the connection if
4623 * the parameters get removed and keep the reference
4624 * count consistent once the connection is established.
4626 params->conn = hci_conn_get(conn);
4630 switch (PTR_ERR(conn)) {
4632 /* If hci_connect() returns -EBUSY it means there is already
4633 * an LE connection attempt going on. Since controllers don't
4634 * support more than one connection attempt at the time, we
4635 * don't consider this an error case.
4639 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4646 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4647 u8 bdaddr_type, bdaddr_t *direct_addr,
4648 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4650 struct discovery_state *d = &hdev->discovery;
4651 struct smp_irk *irk;
4652 struct hci_conn *conn;
4656 /* If the direct address is present, then this report is from
4657 * a LE Direct Advertising Report event. In that case it is
4658 * important to see if the address is matching the local
4659 * controller address.
4662 /* Only resolvable random addresses are valid for these
4663 * kind of reports and others can be ignored.
4665 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4668 /* If the controller is not using resolvable random
4669 * addresses, then this report can be ignored.
4671 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4674 /* If the local IRK of the controller does not match
4675 * with the resolvable random address provided, then
4676 * this report can be ignored.
4678 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4682 /* Check if we need to convert to identity address */
4683 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4685 bdaddr = &irk->bdaddr;
4686 bdaddr_type = irk->addr_type;
4689 /* Check if we have been requested to connect to this device */
4690 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4691 if (conn && type == LE_ADV_IND) {
4692 /* Store report for later inclusion by
4693 * mgmt_device_connected
4695 memcpy(conn->le_adv_data, data, len);
4696 conn->le_adv_data_len = len;
4699 /* Passive scanning shouldn't trigger any device found events,
4700 * except for devices marked as CONN_REPORT for which we do send
4701 * device found events.
4703 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4704 if (type == LE_ADV_DIRECT_IND)
4707 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4708 bdaddr, bdaddr_type))
4711 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4712 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4715 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4716 rssi, flags, data, len, NULL, 0);
4720 /* When receiving non-connectable or scannable undirected
4721 * advertising reports, this means that the remote device is
4722 * not connectable and then clearly indicate this in the
4723 * device found event.
4725 * When receiving a scan response, then there is no way to
4726 * know if the remote device is connectable or not. However
4727 * since scan responses are merged with a previously seen
4728 * advertising report, the flags field from that report
4731 * In the really unlikely case that a controller get confused
4732 * and just sends a scan response event, then it is marked as
4733 * not connectable as well.
4735 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4736 type == LE_ADV_SCAN_RSP)
4737 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4741 /* If there's nothing pending either store the data from this
4742 * event or send an immediate device found event if the data
4743 * should not be stored for later.
4745 if (!has_pending_adv_report(hdev)) {
4746 /* If the report will trigger a SCAN_REQ store it for
4749 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4750 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4751 rssi, flags, data, len);
4755 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4756 rssi, flags, data, len, NULL, 0);
4760 /* Check if the pending report is for the same device as the new one */
4761 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4762 bdaddr_type == d->last_adv_addr_type);
4764 /* If the pending data doesn't match this report or this isn't a
4765 * scan response (e.g. we got a duplicate ADV_IND) then force
4766 * sending of the pending data.
4768 if (type != LE_ADV_SCAN_RSP || !match) {
4769 /* Send out whatever is in the cache, but skip duplicates */
4771 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4772 d->last_adv_addr_type, NULL,
4773 d->last_adv_rssi, d->last_adv_flags,
4775 d->last_adv_data_len, NULL, 0);
4777 /* If the new report will trigger a SCAN_REQ store it for
4780 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4781 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4782 rssi, flags, data, len);
4786 /* The advertising reports cannot be merged, so clear
4787 * the pending report and send out a device found event.
4789 clear_pending_adv_report(hdev);
4790 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4791 rssi, flags, data, len, NULL, 0);
4795 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4796 * the new event is a SCAN_RSP. We can therefore proceed with
4797 * sending a merged device found event.
4799 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4800 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4801 d->last_adv_data, d->last_adv_data_len, data, len);
4802 clear_pending_adv_report(hdev);
4805 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4807 u8 num_reports = skb->data[0];
4808 void *ptr = &skb->data[1];
4812 while (num_reports--) {
4813 struct hci_ev_le_advertising_info *ev = ptr;
4816 rssi = ev->data[ev->length];
4817 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4818 ev->bdaddr_type, NULL, 0, rssi,
4819 ev->data, ev->length);
4821 ptr += sizeof(*ev) + ev->length + 1;
4824 hci_dev_unlock(hdev);
4827 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4829 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4830 struct hci_cp_le_ltk_reply cp;
4831 struct hci_cp_le_ltk_neg_reply neg;
4832 struct hci_conn *conn;
4833 struct smp_ltk *ltk;
4835 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4839 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4843 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4847 if (smp_ltk_is_sc(ltk)) {
4848 /* With SC both EDiv and Rand are set to zero */
4849 if (ev->ediv || ev->rand)
4852 /* For non-SC keys check that EDiv and Rand match */
4853 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4857 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4858 cp.handle = cpu_to_le16(conn->handle);
4860 conn->pending_sec_level = smp_ltk_sec_level(ltk);
4862 conn->enc_key_size = ltk->enc_size;
4864 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4866 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4867 * temporary key used to encrypt a connection following
4868 * pairing. It is used during the Encrypted Session Setup to
4869 * distribute the keys. Later, security can be re-established
4870 * using a distributed LTK.
4872 if (ltk->type == SMP_STK) {
4873 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4874 list_del_rcu(<k->list);
4875 kfree_rcu(ltk, rcu);
4877 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4880 hci_dev_unlock(hdev);
4885 neg.handle = ev->handle;
4886 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4887 hci_dev_unlock(hdev);
4890 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4893 struct hci_cp_le_conn_param_req_neg_reply cp;
4895 cp.handle = cpu_to_le16(handle);
4898 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4902 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4903 struct sk_buff *skb)
4905 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4906 struct hci_cp_le_conn_param_req_reply cp;
4907 struct hci_conn *hcon;
4908 u16 handle, min, max, latency, timeout;
4910 handle = le16_to_cpu(ev->handle);
4911 min = le16_to_cpu(ev->interval_min);
4912 max = le16_to_cpu(ev->interval_max);
4913 latency = le16_to_cpu(ev->latency);
4914 timeout = le16_to_cpu(ev->timeout);
4916 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4917 if (!hcon || hcon->state != BT_CONNECTED)
4918 return send_conn_param_neg_reply(hdev, handle,
4919 HCI_ERROR_UNKNOWN_CONN_ID);
4921 if (hci_check_conn_params(min, max, latency, timeout))
4922 return send_conn_param_neg_reply(hdev, handle,
4923 HCI_ERROR_INVALID_LL_PARAMS);
4925 if (hcon->role == HCI_ROLE_MASTER) {
4926 struct hci_conn_params *params;
4931 params = hci_conn_params_lookup(hdev, &hcon->dst,
4934 params->conn_min_interval = min;
4935 params->conn_max_interval = max;
4936 params->conn_latency = latency;
4937 params->supervision_timeout = timeout;
4943 hci_dev_unlock(hdev);
4945 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4946 store_hint, min, max, latency, timeout);
4949 cp.handle = ev->handle;
4950 cp.interval_min = ev->interval_min;
4951 cp.interval_max = ev->interval_max;
4952 cp.latency = ev->latency;
4953 cp.timeout = ev->timeout;
4957 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4960 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
4961 struct sk_buff *skb)
4963 u8 num_reports = skb->data[0];
4964 void *ptr = &skb->data[1];
4968 while (num_reports--) {
4969 struct hci_ev_le_direct_adv_info *ev = ptr;
4971 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4972 ev->bdaddr_type, &ev->direct_addr,
4973 ev->direct_addr_type, ev->rssi, NULL, 0);
4978 hci_dev_unlock(hdev);
4981 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4983 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4985 skb_pull(skb, sizeof(*le_ev));
4987 switch (le_ev->subevent) {
4988 case HCI_EV_LE_CONN_COMPLETE:
4989 hci_le_conn_complete_evt(hdev, skb);
4992 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4993 hci_le_conn_update_complete_evt(hdev, skb);
4996 case HCI_EV_LE_ADVERTISING_REPORT:
4997 hci_le_adv_report_evt(hdev, skb);
5000 case HCI_EV_LE_LTK_REQ:
5001 hci_le_ltk_request_evt(hdev, skb);
5004 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5005 hci_le_remote_conn_param_req_evt(hdev, skb);
5008 case HCI_EV_LE_DIRECT_ADV_REPORT:
5009 hci_le_direct_adv_report_evt(hdev, skb);
5017 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5019 struct hci_ev_channel_selected *ev = (void *) skb->data;
5020 struct hci_conn *hcon;
5022 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5024 skb_pull(skb, sizeof(*ev));
5026 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5030 amp_read_loc_assoc_final_data(hdev, hcon);
5033 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5035 struct hci_event_hdr *hdr = (void *) skb->data;
5036 __u8 event = hdr->evt;
5040 /* Received events are (currently) only needed when a request is
5041 * ongoing so avoid unnecessary memory allocation.
5043 if (hci_req_pending(hdev)) {
5044 kfree_skb(hdev->recv_evt);
5045 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
5048 hci_dev_unlock(hdev);
5050 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5052 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
5053 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5054 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
5056 hci_req_cmd_complete(hdev, opcode, 0);
5060 case HCI_EV_INQUIRY_COMPLETE:
5061 hci_inquiry_complete_evt(hdev, skb);
5064 case HCI_EV_INQUIRY_RESULT:
5065 hci_inquiry_result_evt(hdev, skb);
5068 case HCI_EV_CONN_COMPLETE:
5069 hci_conn_complete_evt(hdev, skb);
5072 case HCI_EV_CONN_REQUEST:
5073 hci_conn_request_evt(hdev, skb);
5076 case HCI_EV_DISCONN_COMPLETE:
5077 hci_disconn_complete_evt(hdev, skb);
5080 case HCI_EV_AUTH_COMPLETE:
5081 hci_auth_complete_evt(hdev, skb);
5084 case HCI_EV_REMOTE_NAME:
5085 hci_remote_name_evt(hdev, skb);
5088 case HCI_EV_ENCRYPT_CHANGE:
5089 hci_encrypt_change_evt(hdev, skb);
5092 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5093 hci_change_link_key_complete_evt(hdev, skb);
5096 case HCI_EV_REMOTE_FEATURES:
5097 hci_remote_features_evt(hdev, skb);
5100 case HCI_EV_CMD_COMPLETE:
5101 hci_cmd_complete_evt(hdev, skb);
5104 case HCI_EV_CMD_STATUS:
5105 hci_cmd_status_evt(hdev, skb);
5108 case HCI_EV_HARDWARE_ERROR:
5109 hci_hardware_error_evt(hdev, skb);
5112 case HCI_EV_ROLE_CHANGE:
5113 hci_role_change_evt(hdev, skb);
5116 case HCI_EV_NUM_COMP_PKTS:
5117 hci_num_comp_pkts_evt(hdev, skb);
5120 case HCI_EV_MODE_CHANGE:
5121 hci_mode_change_evt(hdev, skb);
5124 case HCI_EV_PIN_CODE_REQ:
5125 hci_pin_code_request_evt(hdev, skb);
5128 case HCI_EV_LINK_KEY_REQ:
5129 hci_link_key_request_evt(hdev, skb);
5132 case HCI_EV_LINK_KEY_NOTIFY:
5133 hci_link_key_notify_evt(hdev, skb);
5136 case HCI_EV_CLOCK_OFFSET:
5137 hci_clock_offset_evt(hdev, skb);
5140 case HCI_EV_PKT_TYPE_CHANGE:
5141 hci_pkt_type_change_evt(hdev, skb);
5144 case HCI_EV_PSCAN_REP_MODE:
5145 hci_pscan_rep_mode_evt(hdev, skb);
5148 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5149 hci_inquiry_result_with_rssi_evt(hdev, skb);
5152 case HCI_EV_REMOTE_EXT_FEATURES:
5153 hci_remote_ext_features_evt(hdev, skb);
5156 case HCI_EV_SYNC_CONN_COMPLETE:
5157 hci_sync_conn_complete_evt(hdev, skb);
5160 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5161 hci_extended_inquiry_result_evt(hdev, skb);
5164 case HCI_EV_KEY_REFRESH_COMPLETE:
5165 hci_key_refresh_complete_evt(hdev, skb);
5168 case HCI_EV_IO_CAPA_REQUEST:
5169 hci_io_capa_request_evt(hdev, skb);
5172 case HCI_EV_IO_CAPA_REPLY:
5173 hci_io_capa_reply_evt(hdev, skb);
5176 case HCI_EV_USER_CONFIRM_REQUEST:
5177 hci_user_confirm_request_evt(hdev, skb);
5180 case HCI_EV_USER_PASSKEY_REQUEST:
5181 hci_user_passkey_request_evt(hdev, skb);
5184 case HCI_EV_USER_PASSKEY_NOTIFY:
5185 hci_user_passkey_notify_evt(hdev, skb);
5188 case HCI_EV_KEYPRESS_NOTIFY:
5189 hci_keypress_notify_evt(hdev, skb);
5192 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5193 hci_simple_pair_complete_evt(hdev, skb);
5196 case HCI_EV_REMOTE_HOST_FEATURES:
5197 hci_remote_host_features_evt(hdev, skb);
5200 case HCI_EV_LE_META:
5201 hci_le_meta_evt(hdev, skb);
5204 case HCI_EV_CHANNEL_SELECTED:
5205 hci_chan_selected_evt(hdev, skb);
5208 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5209 hci_remote_oob_data_request_evt(hdev, skb);
5212 case HCI_EV_PHY_LINK_COMPLETE:
5213 hci_phy_link_complete_evt(hdev, skb);
5216 case HCI_EV_LOGICAL_LINK_COMPLETE:
5217 hci_loglink_complete_evt(hdev, skb);
5220 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5221 hci_disconn_loglink_complete_evt(hdev, skb);
5224 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5225 hci_disconn_phylink_complete_evt(hdev, skb);
5228 case HCI_EV_NUM_COMP_BLOCKS:
5229 hci_num_comp_blocks_evt(hdev, skb);
5233 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5238 hdev->stat.evt_rx++;