2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 /* Handle HCI Event packets */
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
46 __u8 status = *((__u8 *) skb->data);
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
58 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
61 hci_conn_check_pending(hdev);
64 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
66 __u8 status = *((__u8 *) skb->data);
68 BT_DBG("%s status 0x%2.2x", hdev->name, status);
73 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
78 __u8 status = *((__u8 *) skb->data);
80 BT_DBG("%s status 0x%2.2x", hdev->name, status);
85 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
87 hci_conn_check_pending(hdev);
90 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
93 BT_DBG("%s", hdev->name);
96 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
98 struct hci_rp_role_discovery *rp = (void *) skb->data;
99 struct hci_conn *conn;
101 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
108 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
110 conn->role = rp->role;
112 hci_dev_unlock(hdev);
115 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 struct hci_rp_read_link_policy *rp = (void *) skb->data;
118 struct hci_conn *conn;
120 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
127 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 conn->link_policy = __le16_to_cpu(rp->policy);
131 hci_dev_unlock(hdev);
134 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 struct hci_rp_write_link_policy *rp = (void *) skb->data;
137 struct hci_conn *conn;
140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
145 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
151 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 conn->link_policy = get_unaligned_le16(sent + 2);
155 hci_dev_unlock(hdev);
158 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
168 hdev->link_policy = __le16_to_cpu(rp->policy);
171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
174 __u8 status = *((__u8 *) skb->data);
177 BT_DBG("%s status 0x%2.2x", hdev->name, status);
182 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
186 hdev->link_policy = get_unaligned_le16(sent);
189 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
191 __u8 status = *((__u8 *) skb->data);
193 BT_DBG("%s status 0x%2.2x", hdev->name, status);
195 clear_bit(HCI_RESET, &hdev->flags);
200 /* Reset all non-persistent flags */
201 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
203 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
205 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
206 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
208 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
209 hdev->adv_data_len = 0;
211 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
212 hdev->scan_rsp_data_len = 0;
214 hdev->le_scan_type = LE_SCAN_PASSIVE;
216 hdev->ssp_debug_mode = 0;
218 hci_bdaddr_list_clear(&hdev->le_white_list);
221 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
224 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
225 struct hci_cp_read_stored_link_key *sent;
227 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
229 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
233 if (!rp->status && sent->read_all == 0x01) {
234 hdev->stored_max_keys = rp->max_keys;
235 hdev->stored_num_keys = rp->num_keys;
239 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
242 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
244 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
249 if (rp->num_keys <= hdev->stored_num_keys)
250 hdev->stored_num_keys -= rp->num_keys;
252 hdev->stored_num_keys = 0;
255 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
257 __u8 status = *((__u8 *) skb->data);
260 BT_DBG("%s status 0x%2.2x", hdev->name, status);
262 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
268 if (test_bit(HCI_MGMT, &hdev->dev_flags))
269 mgmt_set_local_name_complete(hdev, sent, status);
271 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
273 hci_dev_unlock(hdev);
276 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
278 struct hci_rp_read_local_name *rp = (void *) skb->data;
280 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
285 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
286 test_bit(HCI_CONFIG, &hdev->dev_flags))
287 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
290 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
292 __u8 status = *((__u8 *) skb->data);
295 BT_DBG("%s status 0x%2.2x", hdev->name, status);
297 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
304 __u8 param = *((__u8 *) sent);
306 if (param == AUTH_ENABLED)
307 set_bit(HCI_AUTH, &hdev->flags);
309 clear_bit(HCI_AUTH, &hdev->flags);
312 if (test_bit(HCI_MGMT, &hdev->dev_flags))
313 mgmt_auth_enable_complete(hdev, status);
315 hci_dev_unlock(hdev);
318 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
320 __u8 status = *((__u8 *) skb->data);
324 BT_DBG("%s status 0x%2.2x", hdev->name, status);
329 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
333 param = *((__u8 *) sent);
336 set_bit(HCI_ENCRYPT, &hdev->flags);
338 clear_bit(HCI_ENCRYPT, &hdev->flags);
341 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
343 __u8 status = *((__u8 *) skb->data);
347 BT_DBG("%s status 0x%2.2x", hdev->name, status);
349 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
353 param = *((__u8 *) sent);
358 hdev->discov_timeout = 0;
362 if (param & SCAN_INQUIRY)
363 set_bit(HCI_ISCAN, &hdev->flags);
365 clear_bit(HCI_ISCAN, &hdev->flags);
367 if (param & SCAN_PAGE)
368 set_bit(HCI_PSCAN, &hdev->flags);
370 clear_bit(HCI_PSCAN, &hdev->flags);
373 hci_dev_unlock(hdev);
376 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
378 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
380 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
385 memcpy(hdev->dev_class, rp->dev_class, 3);
387 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
388 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
391 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
393 __u8 status = *((__u8 *) skb->data);
396 BT_DBG("%s status 0x%2.2x", hdev->name, status);
398 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
405 memcpy(hdev->dev_class, sent, 3);
407 if (test_bit(HCI_MGMT, &hdev->dev_flags))
408 mgmt_set_class_of_dev_complete(hdev, sent, status);
410 hci_dev_unlock(hdev);
413 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
415 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
418 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
423 setting = __le16_to_cpu(rp->voice_setting);
425 if (hdev->voice_setting == setting)
428 hdev->voice_setting = setting;
430 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
433 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
436 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
439 __u8 status = *((__u8 *) skb->data);
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
448 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
452 setting = get_unaligned_le16(sent);
454 if (hdev->voice_setting == setting)
457 hdev->voice_setting = setting;
459 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
462 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
465 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
468 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
470 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
475 hdev->num_iac = rp->num_iac;
477 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
480 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
482 __u8 status = *((__u8 *) skb->data);
483 struct hci_cp_write_ssp_mode *sent;
485 BT_DBG("%s status 0x%2.2x", hdev->name, status);
487 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
495 hdev->features[1][0] |= LMP_HOST_SSP;
497 hdev->features[1][0] &= ~LMP_HOST_SSP;
500 if (test_bit(HCI_MGMT, &hdev->dev_flags))
501 mgmt_ssp_enable_complete(hdev, sent->mode, status);
504 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
506 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
509 hci_dev_unlock(hdev);
512 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
514 u8 status = *((u8 *) skb->data);
515 struct hci_cp_write_sc_support *sent;
517 BT_DBG("%s status 0x%2.2x", hdev->name, status);
519 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
527 hdev->features[1][0] |= LMP_HOST_SC;
529 hdev->features[1][0] &= ~LMP_HOST_SC;
532 if (!test_bit(HCI_MGMT, &hdev->dev_flags) && !status) {
534 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
536 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
539 hci_dev_unlock(hdev);
542 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
544 struct hci_rp_read_local_version *rp = (void *) skb->data;
546 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
551 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
552 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
553 hdev->hci_ver = rp->hci_ver;
554 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
555 hdev->lmp_ver = rp->lmp_ver;
556 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
557 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
561 static void hci_cc_read_local_commands(struct hci_dev *hdev,
564 struct hci_rp_read_local_commands *rp = (void *) skb->data;
566 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
571 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
572 test_bit(HCI_CONFIG, &hdev->dev_flags))
573 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
576 static void hci_cc_read_local_features(struct hci_dev *hdev,
579 struct hci_rp_read_local_features *rp = (void *) skb->data;
581 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
586 memcpy(hdev->features, rp->features, 8);
588 /* Adjust default settings according to features
589 * supported by device. */
591 if (hdev->features[0][0] & LMP_3SLOT)
592 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
594 if (hdev->features[0][0] & LMP_5SLOT)
595 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
597 if (hdev->features[0][1] & LMP_HV2) {
598 hdev->pkt_type |= (HCI_HV2);
599 hdev->esco_type |= (ESCO_HV2);
602 if (hdev->features[0][1] & LMP_HV3) {
603 hdev->pkt_type |= (HCI_HV3);
604 hdev->esco_type |= (ESCO_HV3);
607 if (lmp_esco_capable(hdev))
608 hdev->esco_type |= (ESCO_EV3);
610 if (hdev->features[0][4] & LMP_EV4)
611 hdev->esco_type |= (ESCO_EV4);
613 if (hdev->features[0][4] & LMP_EV5)
614 hdev->esco_type |= (ESCO_EV5);
616 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
617 hdev->esco_type |= (ESCO_2EV3);
619 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
620 hdev->esco_type |= (ESCO_3EV3);
622 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
623 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
626 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
629 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
631 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
636 if (hdev->max_page < rp->max_page)
637 hdev->max_page = rp->max_page;
639 if (rp->page < HCI_MAX_PAGES)
640 memcpy(hdev->features[rp->page], rp->features, 8);
643 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
646 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
653 hdev->flow_ctl_mode = rp->mode;
656 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
658 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
660 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
665 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
666 hdev->sco_mtu = rp->sco_mtu;
667 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
668 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
670 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
675 hdev->acl_cnt = hdev->acl_pkts;
676 hdev->sco_cnt = hdev->sco_pkts;
678 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
679 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
682 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
684 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
686 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
691 if (test_bit(HCI_INIT, &hdev->flags))
692 bacpy(&hdev->bdaddr, &rp->bdaddr);
694 if (test_bit(HCI_SETUP, &hdev->dev_flags))
695 bacpy(&hdev->setup_addr, &rp->bdaddr);
698 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
701 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
703 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
708 if (test_bit(HCI_INIT, &hdev->flags)) {
709 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
710 hdev->page_scan_window = __le16_to_cpu(rp->window);
714 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
717 u8 status = *((u8 *) skb->data);
718 struct hci_cp_write_page_scan_activity *sent;
720 BT_DBG("%s status 0x%2.2x", hdev->name, status);
725 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
729 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
730 hdev->page_scan_window = __le16_to_cpu(sent->window);
733 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
736 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
743 if (test_bit(HCI_INIT, &hdev->flags))
744 hdev->page_scan_type = rp->type;
747 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
750 u8 status = *((u8 *) skb->data);
753 BT_DBG("%s status 0x%2.2x", hdev->name, status);
758 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
760 hdev->page_scan_type = *type;
763 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
766 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
768 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
773 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
774 hdev->block_len = __le16_to_cpu(rp->block_len);
775 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
777 hdev->block_cnt = hdev->num_blocks;
779 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
780 hdev->block_cnt, hdev->block_len);
783 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
785 struct hci_rp_read_clock *rp = (void *) skb->data;
786 struct hci_cp_read_clock *cp;
787 struct hci_conn *conn;
789 BT_DBG("%s", hdev->name);
791 if (skb->len < sizeof(*rp))
799 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
803 if (cp->which == 0x00) {
804 hdev->clock = le32_to_cpu(rp->clock);
808 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
810 conn->clock = le32_to_cpu(rp->clock);
811 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
815 hci_dev_unlock(hdev);
818 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
821 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
823 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
828 hdev->amp_status = rp->amp_status;
829 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
830 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
831 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
832 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
833 hdev->amp_type = rp->amp_type;
834 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
835 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
836 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
837 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
840 a2mp_send_getinfo_rsp(hdev);
843 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
846 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
847 struct amp_assoc *assoc = &hdev->loc_assoc;
848 size_t rem_len, frag_len;
850 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
855 frag_len = skb->len - sizeof(*rp);
856 rem_len = __le16_to_cpu(rp->rem_len);
858 if (rem_len > frag_len) {
859 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
861 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
862 assoc->offset += frag_len;
864 /* Read other fragments */
865 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
870 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
871 assoc->len = assoc->offset + rem_len;
875 /* Send A2MP Rsp when all fragments are received */
876 a2mp_send_getampassoc_rsp(hdev, rp->status);
877 a2mp_send_create_phy_link_req(hdev, rp->status);
880 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
883 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
885 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
890 hdev->inq_tx_power = rp->tx_power;
893 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
895 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
896 struct hci_cp_pin_code_reply *cp;
897 struct hci_conn *conn;
899 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
903 if (test_bit(HCI_MGMT, &hdev->dev_flags))
904 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
909 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
913 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
915 conn->pin_length = cp->pin_len;
918 hci_dev_unlock(hdev);
921 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
923 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
925 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
929 if (test_bit(HCI_MGMT, &hdev->dev_flags))
930 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
933 hci_dev_unlock(hdev);
936 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
939 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
941 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
946 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
947 hdev->le_pkts = rp->le_max_pkt;
949 hdev->le_cnt = hdev->le_pkts;
951 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
954 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
957 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
959 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
964 memcpy(hdev->le_features, rp->features, 8);
967 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
970 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
972 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
977 hdev->adv_tx_power = rp->tx_power;
980 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
982 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
984 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
988 if (test_bit(HCI_MGMT, &hdev->dev_flags))
989 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
992 hci_dev_unlock(hdev);
995 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
998 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1000 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1004 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1005 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1006 ACL_LINK, 0, rp->status);
1008 hci_dev_unlock(hdev);
1011 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1013 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1015 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1019 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1020 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1023 hci_dev_unlock(hdev);
1026 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1027 struct sk_buff *skb)
1029 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1031 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1035 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1036 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1037 ACL_LINK, 0, rp->status);
1039 hci_dev_unlock(hdev);
1042 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1043 struct sk_buff *skb)
1045 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1047 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1050 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
1052 hci_dev_unlock(hdev);
1055 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1056 struct sk_buff *skb)
1058 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1060 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1063 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
1064 rp->hash256, rp->rand256,
1066 hci_dev_unlock(hdev);
1070 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1072 __u8 status = *((__u8 *) skb->data);
1075 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1080 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1086 bacpy(&hdev->random_addr, sent);
1088 hci_dev_unlock(hdev);
1091 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1093 __u8 *sent, status = *((__u8 *) skb->data);
1095 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1100 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1106 /* If we're doing connection initiation as peripheral. Set a
1107 * timeout in case something goes wrong.
1110 struct hci_conn *conn;
1112 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1114 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1116 queue_delayed_work(hdev->workqueue,
1117 &conn->le_conn_timeout,
1118 conn->conn_timeout);
1120 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1123 hci_dev_unlock(hdev);
1126 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1128 struct hci_cp_le_set_scan_param *cp;
1129 __u8 status = *((__u8 *) skb->data);
1131 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1136 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1142 hdev->le_scan_type = cp->type;
1144 hci_dev_unlock(hdev);
1147 static bool has_pending_adv_report(struct hci_dev *hdev)
1149 struct discovery_state *d = &hdev->discovery;
1151 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1154 static void clear_pending_adv_report(struct hci_dev *hdev)
1156 struct discovery_state *d = &hdev->discovery;
1158 bacpy(&d->last_adv_addr, BDADDR_ANY);
1159 d->last_adv_data_len = 0;
1162 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1163 u8 bdaddr_type, s8 rssi, u32 flags,
1166 struct discovery_state *d = &hdev->discovery;
1168 bacpy(&d->last_adv_addr, bdaddr);
1169 d->last_adv_addr_type = bdaddr_type;
1170 d->last_adv_rssi = rssi;
1171 d->last_adv_flags = flags;
1172 memcpy(d->last_adv_data, data, len);
1173 d->last_adv_data_len = len;
1176 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1177 struct sk_buff *skb)
1179 struct hci_cp_le_set_scan_enable *cp;
1180 __u8 status = *((__u8 *) skb->data);
1182 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1187 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1193 switch (cp->enable) {
1194 case LE_SCAN_ENABLE:
1195 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1196 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1197 clear_pending_adv_report(hdev);
1200 case LE_SCAN_DISABLE:
1201 /* We do this here instead of when setting DISCOVERY_STOPPED
1202 * since the latter would potentially require waiting for
1203 * inquiry to stop too.
1205 if (has_pending_adv_report(hdev)) {
1206 struct discovery_state *d = &hdev->discovery;
1208 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1209 d->last_adv_addr_type, NULL,
1210 d->last_adv_rssi, d->last_adv_flags,
1212 d->last_adv_data_len, NULL, 0);
1215 /* Cancel this timer so that we don't try to disable scanning
1216 * when it's already disabled.
1218 cancel_delayed_work(&hdev->le_scan_disable);
1220 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1222 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1223 * interrupted scanning due to a connect request. Mark
1224 * therefore discovery as stopped. If this was not
1225 * because of a connect request advertising might have
1226 * been disabled because of active scanning, so
1227 * re-enable it again if necessary.
1229 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1231 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1232 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1233 hdev->discovery.state == DISCOVERY_FINDING)
1234 mgmt_reenable_advertising(hdev);
1239 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1243 hci_dev_unlock(hdev);
1246 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1247 struct sk_buff *skb)
1249 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1251 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1256 hdev->le_white_list_size = rp->size;
1259 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1260 struct sk_buff *skb)
1262 __u8 status = *((__u8 *) skb->data);
1264 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1269 hci_bdaddr_list_clear(&hdev->le_white_list);
1272 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1273 struct sk_buff *skb)
1275 struct hci_cp_le_add_to_white_list *sent;
1276 __u8 status = *((__u8 *) skb->data);
1278 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1283 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1287 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1291 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1292 struct sk_buff *skb)
1294 struct hci_cp_le_del_from_white_list *sent;
1295 __u8 status = *((__u8 *) skb->data);
1297 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1302 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1306 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1310 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1311 struct sk_buff *skb)
1313 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1315 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1320 memcpy(hdev->le_states, rp->le_states, 8);
1323 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1324 struct sk_buff *skb)
1326 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1328 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1333 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1334 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1337 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1338 struct sk_buff *skb)
1340 struct hci_cp_le_write_def_data_len *sent;
1341 __u8 status = *((__u8 *) skb->data);
1343 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1348 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1352 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1353 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1356 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1357 struct sk_buff *skb)
1359 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1361 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1366 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1367 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1368 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1369 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1372 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1373 struct sk_buff *skb)
1375 struct hci_cp_write_le_host_supported *sent;
1376 __u8 status = *((__u8 *) skb->data);
1378 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1383 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1390 hdev->features[1][0] |= LMP_HOST_LE;
1391 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1393 hdev->features[1][0] &= ~LMP_HOST_LE;
1394 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1395 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1399 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1401 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1403 hci_dev_unlock(hdev);
1406 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1408 struct hci_cp_le_set_adv_param *cp;
1409 u8 status = *((u8 *) skb->data);
1411 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1416 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1421 hdev->adv_addr_type = cp->own_address_type;
1422 hci_dev_unlock(hdev);
1425 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1426 struct sk_buff *skb)
1428 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1430 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1431 hdev->name, rp->status, rp->phy_handle);
1436 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1439 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1441 struct hci_rp_read_rssi *rp = (void *) skb->data;
1442 struct hci_conn *conn;
1444 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1451 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1453 conn->rssi = rp->rssi;
1455 hci_dev_unlock(hdev);
1458 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1460 struct hci_cp_read_tx_power *sent;
1461 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1462 struct hci_conn *conn;
1464 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1469 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1475 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1479 switch (sent->type) {
1481 conn->tx_power = rp->tx_power;
1484 conn->max_tx_power = rp->tx_power;
1489 hci_dev_unlock(hdev);
1492 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1494 u8 status = *((u8 *) skb->data);
1497 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1502 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1504 hdev->ssp_debug_mode = *mode;
1507 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1509 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1512 hci_conn_check_pending(hdev);
1516 set_bit(HCI_INQUIRY, &hdev->flags);
1519 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1521 struct hci_cp_create_conn *cp;
1522 struct hci_conn *conn;
1524 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1526 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1532 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1534 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1537 if (conn && conn->state == BT_CONNECT) {
1538 if (status != 0x0c || conn->attempt > 2) {
1539 conn->state = BT_CLOSED;
1540 hci_proto_connect_cfm(conn, status);
1543 conn->state = BT_CONNECT2;
1547 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1550 BT_ERR("No memory for new connection");
1554 hci_dev_unlock(hdev);
1557 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1559 struct hci_cp_add_sco *cp;
1560 struct hci_conn *acl, *sco;
1563 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1568 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1572 handle = __le16_to_cpu(cp->handle);
1574 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1578 acl = hci_conn_hash_lookup_handle(hdev, handle);
1582 sco->state = BT_CLOSED;
1584 hci_proto_connect_cfm(sco, status);
1589 hci_dev_unlock(hdev);
1592 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1594 struct hci_cp_auth_requested *cp;
1595 struct hci_conn *conn;
1597 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1602 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1608 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1610 if (conn->state == BT_CONFIG) {
1611 hci_proto_connect_cfm(conn, status);
1612 hci_conn_drop(conn);
1616 hci_dev_unlock(hdev);
1619 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1621 struct hci_cp_set_conn_encrypt *cp;
1622 struct hci_conn *conn;
1624 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1629 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1635 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1637 if (conn->state == BT_CONFIG) {
1638 hci_proto_connect_cfm(conn, status);
1639 hci_conn_drop(conn);
1643 hci_dev_unlock(hdev);
1646 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1647 struct hci_conn *conn)
1649 if (conn->state != BT_CONFIG || !conn->out)
1652 if (conn->pending_sec_level == BT_SECURITY_SDP)
1655 /* Only request authentication for SSP connections or non-SSP
1656 * devices with sec_level MEDIUM or HIGH or if MITM protection
1659 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1660 conn->pending_sec_level != BT_SECURITY_FIPS &&
1661 conn->pending_sec_level != BT_SECURITY_HIGH &&
1662 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1668 static int hci_resolve_name(struct hci_dev *hdev,
1669 struct inquiry_entry *e)
1671 struct hci_cp_remote_name_req cp;
1673 memset(&cp, 0, sizeof(cp));
1675 bacpy(&cp.bdaddr, &e->data.bdaddr);
1676 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1677 cp.pscan_mode = e->data.pscan_mode;
1678 cp.clock_offset = e->data.clock_offset;
1680 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1683 static bool hci_resolve_next_name(struct hci_dev *hdev)
1685 struct discovery_state *discov = &hdev->discovery;
1686 struct inquiry_entry *e;
1688 if (list_empty(&discov->resolve))
1691 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1695 if (hci_resolve_name(hdev, e) == 0) {
1696 e->name_state = NAME_PENDING;
1703 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1704 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1706 struct discovery_state *discov = &hdev->discovery;
1707 struct inquiry_entry *e;
1709 /* Update the mgmt connected state if necessary. Be careful with
1710 * conn objects that exist but are not (yet) connected however.
1711 * Only those in BT_CONFIG or BT_CONNECTED states can be
1712 * considered connected.
1715 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1716 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1717 mgmt_device_connected(hdev, conn, 0, name, name_len);
1719 if (discov->state == DISCOVERY_STOPPED)
1722 if (discov->state == DISCOVERY_STOPPING)
1723 goto discov_complete;
1725 if (discov->state != DISCOVERY_RESOLVING)
1728 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1729 /* If the device was not found in a list of found devices names of which
1730 * are pending. there is no need to continue resolving a next name as it
1731 * will be done upon receiving another Remote Name Request Complete
1738 e->name_state = NAME_KNOWN;
1739 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1740 e->data.rssi, name, name_len);
1742 e->name_state = NAME_NOT_KNOWN;
1745 if (hci_resolve_next_name(hdev))
1749 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1752 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1754 struct hci_cp_remote_name_req *cp;
1755 struct hci_conn *conn;
1757 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1759 /* If successful wait for the name req complete event before
1760 * checking for the need to do authentication */
1764 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1770 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1772 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1773 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1778 if (!hci_outgoing_auth_needed(hdev, conn))
1781 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1782 struct hci_cp_auth_requested auth_cp;
1784 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1786 auth_cp.handle = __cpu_to_le16(conn->handle);
1787 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1788 sizeof(auth_cp), &auth_cp);
1792 hci_dev_unlock(hdev);
1795 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1797 struct hci_cp_read_remote_features *cp;
1798 struct hci_conn *conn;
1800 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1805 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1811 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1813 if (conn->state == BT_CONFIG) {
1814 hci_proto_connect_cfm(conn, status);
1815 hci_conn_drop(conn);
1819 hci_dev_unlock(hdev);
1822 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1824 struct hci_cp_read_remote_ext_features *cp;
1825 struct hci_conn *conn;
1827 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1832 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1838 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1840 if (conn->state == BT_CONFIG) {
1841 hci_proto_connect_cfm(conn, status);
1842 hci_conn_drop(conn);
1846 hci_dev_unlock(hdev);
1849 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1851 struct hci_cp_setup_sync_conn *cp;
1852 struct hci_conn *acl, *sco;
1855 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1860 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1864 handle = __le16_to_cpu(cp->handle);
1866 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1870 acl = hci_conn_hash_lookup_handle(hdev, handle);
1874 sco->state = BT_CLOSED;
1876 hci_proto_connect_cfm(sco, status);
1881 hci_dev_unlock(hdev);
1884 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1886 struct hci_cp_sniff_mode *cp;
1887 struct hci_conn *conn;
1889 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1894 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1900 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1902 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1904 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1905 hci_sco_setup(conn, status);
1908 hci_dev_unlock(hdev);
1911 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1913 struct hci_cp_exit_sniff_mode *cp;
1914 struct hci_conn *conn;
1916 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1921 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1927 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1929 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1931 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1932 hci_sco_setup(conn, status);
1935 hci_dev_unlock(hdev);
1938 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1940 struct hci_cp_disconnect *cp;
1941 struct hci_conn *conn;
1946 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1952 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1954 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1955 conn->dst_type, status);
1957 hci_dev_unlock(hdev);
1960 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1962 struct hci_cp_create_phy_link *cp;
1964 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1966 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1973 struct hci_conn *hcon;
1975 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1979 amp_write_remote_assoc(hdev, cp->phy_handle);
1982 hci_dev_unlock(hdev);
1985 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1987 struct hci_cp_accept_phy_link *cp;
1989 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1994 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1998 amp_write_remote_assoc(hdev, cp->phy_handle);
2001 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2003 struct hci_cp_le_create_conn *cp;
2004 struct hci_conn *conn;
2006 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2008 /* All connection failure handling is taken care of by the
2009 * hci_le_conn_failed function which is triggered by the HCI
2010 * request completion callbacks used for connecting.
2015 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2021 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
2025 /* Store the initiator and responder address information which
2026 * is needed for SMP. These values will not change during the
2027 * lifetime of the connection.
2029 conn->init_addr_type = cp->own_address_type;
2030 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
2031 bacpy(&conn->init_addr, &hdev->random_addr);
2033 bacpy(&conn->init_addr, &hdev->bdaddr);
2035 conn->resp_addr_type = cp->peer_addr_type;
2036 bacpy(&conn->resp_addr, &cp->peer_addr);
2038 /* We don't want the connection attempt to stick around
2039 * indefinitely since LE doesn't have a page timeout concept
2040 * like BR/EDR. Set a timer for any connection that doesn't use
2041 * the white list for connecting.
2043 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
2044 queue_delayed_work(conn->hdev->workqueue,
2045 &conn->le_conn_timeout,
2046 conn->conn_timeout);
2049 hci_dev_unlock(hdev);
2052 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2054 struct hci_cp_le_start_enc *cp;
2055 struct hci_conn *conn;
2057 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2064 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2068 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2072 if (conn->state != BT_CONNECTED)
2075 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2076 hci_conn_drop(conn);
2079 hci_dev_unlock(hdev);
2082 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2084 struct hci_cp_switch_role *cp;
2085 struct hci_conn *conn;
2087 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2092 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2098 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2100 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2102 hci_dev_unlock(hdev);
2105 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2107 __u8 status = *((__u8 *) skb->data);
2108 struct discovery_state *discov = &hdev->discovery;
2109 struct inquiry_entry *e;
2111 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2113 hci_conn_check_pending(hdev);
2115 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2118 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2119 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2121 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2126 if (discov->state != DISCOVERY_FINDING)
2129 if (list_empty(&discov->resolve)) {
2130 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2134 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2135 if (e && hci_resolve_name(hdev, e) == 0) {
2136 e->name_state = NAME_PENDING;
2137 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2139 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2143 hci_dev_unlock(hdev);
2146 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2148 struct inquiry_data data;
2149 struct inquiry_info *info = (void *) (skb->data + 1);
2150 int num_rsp = *((__u8 *) skb->data);
2152 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2157 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2162 for (; num_rsp; num_rsp--, info++) {
2165 bacpy(&data.bdaddr, &info->bdaddr);
2166 data.pscan_rep_mode = info->pscan_rep_mode;
2167 data.pscan_period_mode = info->pscan_period_mode;
2168 data.pscan_mode = info->pscan_mode;
2169 memcpy(data.dev_class, info->dev_class, 3);
2170 data.clock_offset = info->clock_offset;
2171 data.rssi = HCI_RSSI_INVALID;
2172 data.ssp_mode = 0x00;
2174 flags = hci_inquiry_cache_update(hdev, &data, false);
2176 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2177 info->dev_class, HCI_RSSI_INVALID,
2178 flags, NULL, 0, NULL, 0);
2181 hci_dev_unlock(hdev);
2184 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2186 struct hci_ev_conn_complete *ev = (void *) skb->data;
2187 struct hci_conn *conn;
2189 BT_DBG("%s", hdev->name);
2193 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2195 if (ev->link_type != SCO_LINK)
2198 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2202 conn->type = SCO_LINK;
2206 conn->handle = __le16_to_cpu(ev->handle);
2208 if (conn->type == ACL_LINK) {
2209 conn->state = BT_CONFIG;
2210 hci_conn_hold(conn);
2212 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2213 !hci_find_link_key(hdev, &ev->bdaddr))
2214 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2216 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2218 conn->state = BT_CONNECTED;
2220 hci_debugfs_create_conn(conn);
2221 hci_conn_add_sysfs(conn);
2223 if (test_bit(HCI_AUTH, &hdev->flags))
2224 set_bit(HCI_CONN_AUTH, &conn->flags);
2226 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2227 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2229 /* Get remote features */
2230 if (conn->type == ACL_LINK) {
2231 struct hci_cp_read_remote_features cp;
2232 cp.handle = ev->handle;
2233 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2236 hci_update_page_scan(hdev);
2239 /* Set packet type for incoming connection */
2240 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2241 struct hci_cp_change_conn_ptype cp;
2242 cp.handle = ev->handle;
2243 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2244 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2248 conn->state = BT_CLOSED;
2249 if (conn->type == ACL_LINK)
2250 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2251 conn->dst_type, ev->status);
2254 if (conn->type == ACL_LINK)
2255 hci_sco_setup(conn, ev->status);
2258 hci_proto_connect_cfm(conn, ev->status);
2260 } else if (ev->link_type != ACL_LINK)
2261 hci_proto_connect_cfm(conn, ev->status);
2264 hci_dev_unlock(hdev);
2266 hci_conn_check_pending(hdev);
2269 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2271 struct hci_cp_reject_conn_req cp;
2273 bacpy(&cp.bdaddr, bdaddr);
2274 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2275 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2278 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2280 struct hci_ev_conn_request *ev = (void *) skb->data;
2281 int mask = hdev->link_mode;
2282 struct inquiry_entry *ie;
2283 struct hci_conn *conn;
2286 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2289 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2292 if (!(mask & HCI_LM_ACCEPT)) {
2293 hci_reject_conn(hdev, &ev->bdaddr);
2297 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2299 hci_reject_conn(hdev, &ev->bdaddr);
2303 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2304 * connection. These features are only touched through mgmt so
2305 * only do the checks if HCI_MGMT is set.
2307 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2308 !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2309 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2311 hci_reject_conn(hdev, &ev->bdaddr);
2315 /* Connection accepted */
2319 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2321 memcpy(ie->data.dev_class, ev->dev_class, 3);
2323 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2326 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2329 BT_ERR("No memory for new connection");
2330 hci_dev_unlock(hdev);
2335 memcpy(conn->dev_class, ev->dev_class, 3);
2337 hci_dev_unlock(hdev);
2339 if (ev->link_type == ACL_LINK ||
2340 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2341 struct hci_cp_accept_conn_req cp;
2342 conn->state = BT_CONNECT;
2344 bacpy(&cp.bdaddr, &ev->bdaddr);
2346 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2347 cp.role = 0x00; /* Become master */
2349 cp.role = 0x01; /* Remain slave */
2351 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2352 } else if (!(flags & HCI_PROTO_DEFER)) {
2353 struct hci_cp_accept_sync_conn_req cp;
2354 conn->state = BT_CONNECT;
2356 bacpy(&cp.bdaddr, &ev->bdaddr);
2357 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2359 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2360 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2361 cp.max_latency = cpu_to_le16(0xffff);
2362 cp.content_format = cpu_to_le16(hdev->voice_setting);
2363 cp.retrans_effort = 0xff;
2365 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2368 conn->state = BT_CONNECT2;
2369 hci_proto_connect_cfm(conn, 0);
2373 static u8 hci_to_mgmt_reason(u8 err)
2376 case HCI_ERROR_CONNECTION_TIMEOUT:
2377 return MGMT_DEV_DISCONN_TIMEOUT;
2378 case HCI_ERROR_REMOTE_USER_TERM:
2379 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2380 case HCI_ERROR_REMOTE_POWER_OFF:
2381 return MGMT_DEV_DISCONN_REMOTE;
2382 case HCI_ERROR_LOCAL_HOST_TERM:
2383 return MGMT_DEV_DISCONN_LOCAL_HOST;
2385 return MGMT_DEV_DISCONN_UNKNOWN;
2389 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2391 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2392 u8 reason = hci_to_mgmt_reason(ev->reason);
2393 struct hci_conn_params *params;
2394 struct hci_conn *conn;
2395 bool mgmt_connected;
2398 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2402 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2407 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2408 conn->dst_type, ev->status);
2412 conn->state = BT_CLOSED;
2414 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2415 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2416 reason, mgmt_connected);
2418 if (conn->type == ACL_LINK) {
2419 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2420 hci_remove_link_key(hdev, &conn->dst);
2422 hci_update_page_scan(hdev);
2425 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2427 switch (params->auto_connect) {
2428 case HCI_AUTO_CONN_LINK_LOSS:
2429 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2433 case HCI_AUTO_CONN_DIRECT:
2434 case HCI_AUTO_CONN_ALWAYS:
2435 list_del_init(¶ms->action);
2436 list_add(¶ms->action, &hdev->pend_le_conns);
2437 hci_update_background_scan(hdev);
2447 hci_proto_disconn_cfm(conn, ev->reason);
2450 /* Re-enable advertising if necessary, since it might
2451 * have been disabled by the connection. From the
2452 * HCI_LE_Set_Advertise_Enable command description in
2453 * the core specification (v4.0):
2454 * "The Controller shall continue advertising until the Host
2455 * issues an LE_Set_Advertise_Enable command with
2456 * Advertising_Enable set to 0x00 (Advertising is disabled)
2457 * or until a connection is created or until the Advertising
2458 * is timed out due to Directed Advertising."
2460 if (type == LE_LINK)
2461 mgmt_reenable_advertising(hdev);
2464 hci_dev_unlock(hdev);
2467 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2469 struct hci_ev_auth_complete *ev = (void *) skb->data;
2470 struct hci_conn *conn;
2472 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2476 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2481 if (!hci_conn_ssp_enabled(conn) &&
2482 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2483 BT_INFO("re-auth of legacy device is not possible.");
2485 set_bit(HCI_CONN_AUTH, &conn->flags);
2486 conn->sec_level = conn->pending_sec_level;
2489 mgmt_auth_failed(conn, ev->status);
2492 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2493 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2495 if (conn->state == BT_CONFIG) {
2496 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2497 struct hci_cp_set_conn_encrypt cp;
2498 cp.handle = ev->handle;
2500 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2503 conn->state = BT_CONNECTED;
2504 hci_proto_connect_cfm(conn, ev->status);
2505 hci_conn_drop(conn);
2508 hci_auth_cfm(conn, ev->status);
2510 hci_conn_hold(conn);
2511 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2512 hci_conn_drop(conn);
2515 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2517 struct hci_cp_set_conn_encrypt cp;
2518 cp.handle = ev->handle;
2520 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2523 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2524 hci_encrypt_cfm(conn, ev->status, 0x00);
2529 hci_dev_unlock(hdev);
2532 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2534 struct hci_ev_remote_name *ev = (void *) skb->data;
2535 struct hci_conn *conn;
2537 BT_DBG("%s", hdev->name);
2539 hci_conn_check_pending(hdev);
2543 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2545 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2548 if (ev->status == 0)
2549 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2550 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2552 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2558 if (!hci_outgoing_auth_needed(hdev, conn))
2561 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2562 struct hci_cp_auth_requested cp;
2564 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2566 cp.handle = __cpu_to_le16(conn->handle);
2567 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2571 hci_dev_unlock(hdev);
2574 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2576 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2577 struct hci_conn *conn;
2579 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2583 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2589 /* Encryption implies authentication */
2590 set_bit(HCI_CONN_AUTH, &conn->flags);
2591 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2592 conn->sec_level = conn->pending_sec_level;
2594 /* P-256 authentication key implies FIPS */
2595 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2596 set_bit(HCI_CONN_FIPS, &conn->flags);
2598 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2599 conn->type == LE_LINK)
2600 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2602 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2603 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2607 /* We should disregard the current RPA and generate a new one
2608 * whenever the encryption procedure fails.
2610 if (ev->status && conn->type == LE_LINK)
2611 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2613 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2615 if (ev->status && conn->state == BT_CONNECTED) {
2616 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2617 hci_conn_drop(conn);
2621 if (conn->state == BT_CONFIG) {
2623 conn->state = BT_CONNECTED;
2625 /* In Secure Connections Only mode, do not allow any
2626 * connections that are not encrypted with AES-CCM
2627 * using a P-256 authenticated combination key.
2629 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2630 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2631 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2632 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2633 hci_conn_drop(conn);
2637 hci_proto_connect_cfm(conn, ev->status);
2638 hci_conn_drop(conn);
2640 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2643 hci_dev_unlock(hdev);
2646 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2647 struct sk_buff *skb)
2649 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2650 struct hci_conn *conn;
2652 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2656 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2659 set_bit(HCI_CONN_SECURE, &conn->flags);
2661 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2663 hci_key_change_cfm(conn, ev->status);
2666 hci_dev_unlock(hdev);
2669 static void hci_remote_features_evt(struct hci_dev *hdev,
2670 struct sk_buff *skb)
2672 struct hci_ev_remote_features *ev = (void *) skb->data;
2673 struct hci_conn *conn;
2675 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2679 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2684 memcpy(conn->features[0], ev->features, 8);
2686 if (conn->state != BT_CONFIG)
2689 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2690 lmp_ext_feat_capable(conn)) {
2691 struct hci_cp_read_remote_ext_features cp;
2692 cp.handle = ev->handle;
2694 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2699 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2700 struct hci_cp_remote_name_req cp;
2701 memset(&cp, 0, sizeof(cp));
2702 bacpy(&cp.bdaddr, &conn->dst);
2703 cp.pscan_rep_mode = 0x02;
2704 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2705 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2706 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2708 if (!hci_outgoing_auth_needed(hdev, conn)) {
2709 conn->state = BT_CONNECTED;
2710 hci_proto_connect_cfm(conn, ev->status);
2711 hci_conn_drop(conn);
2715 hci_dev_unlock(hdev);
2718 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2720 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2721 u8 status = skb->data[sizeof(*ev)];
2724 skb_pull(skb, sizeof(*ev));
2726 opcode = __le16_to_cpu(ev->opcode);
2729 case HCI_OP_INQUIRY_CANCEL:
2730 hci_cc_inquiry_cancel(hdev, skb);
2733 case HCI_OP_PERIODIC_INQ:
2734 hci_cc_periodic_inq(hdev, skb);
2737 case HCI_OP_EXIT_PERIODIC_INQ:
2738 hci_cc_exit_periodic_inq(hdev, skb);
2741 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2742 hci_cc_remote_name_req_cancel(hdev, skb);
2745 case HCI_OP_ROLE_DISCOVERY:
2746 hci_cc_role_discovery(hdev, skb);
2749 case HCI_OP_READ_LINK_POLICY:
2750 hci_cc_read_link_policy(hdev, skb);
2753 case HCI_OP_WRITE_LINK_POLICY:
2754 hci_cc_write_link_policy(hdev, skb);
2757 case HCI_OP_READ_DEF_LINK_POLICY:
2758 hci_cc_read_def_link_policy(hdev, skb);
2761 case HCI_OP_WRITE_DEF_LINK_POLICY:
2762 hci_cc_write_def_link_policy(hdev, skb);
2766 hci_cc_reset(hdev, skb);
2769 case HCI_OP_READ_STORED_LINK_KEY:
2770 hci_cc_read_stored_link_key(hdev, skb);
2773 case HCI_OP_DELETE_STORED_LINK_KEY:
2774 hci_cc_delete_stored_link_key(hdev, skb);
2777 case HCI_OP_WRITE_LOCAL_NAME:
2778 hci_cc_write_local_name(hdev, skb);
2781 case HCI_OP_READ_LOCAL_NAME:
2782 hci_cc_read_local_name(hdev, skb);
2785 case HCI_OP_WRITE_AUTH_ENABLE:
2786 hci_cc_write_auth_enable(hdev, skb);
2789 case HCI_OP_WRITE_ENCRYPT_MODE:
2790 hci_cc_write_encrypt_mode(hdev, skb);
2793 case HCI_OP_WRITE_SCAN_ENABLE:
2794 hci_cc_write_scan_enable(hdev, skb);
2797 case HCI_OP_READ_CLASS_OF_DEV:
2798 hci_cc_read_class_of_dev(hdev, skb);
2801 case HCI_OP_WRITE_CLASS_OF_DEV:
2802 hci_cc_write_class_of_dev(hdev, skb);
2805 case HCI_OP_READ_VOICE_SETTING:
2806 hci_cc_read_voice_setting(hdev, skb);
2809 case HCI_OP_WRITE_VOICE_SETTING:
2810 hci_cc_write_voice_setting(hdev, skb);
2813 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2814 hci_cc_read_num_supported_iac(hdev, skb);
2817 case HCI_OP_WRITE_SSP_MODE:
2818 hci_cc_write_ssp_mode(hdev, skb);
2821 case HCI_OP_WRITE_SC_SUPPORT:
2822 hci_cc_write_sc_support(hdev, skb);
2825 case HCI_OP_READ_LOCAL_VERSION:
2826 hci_cc_read_local_version(hdev, skb);
2829 case HCI_OP_READ_LOCAL_COMMANDS:
2830 hci_cc_read_local_commands(hdev, skb);
2833 case HCI_OP_READ_LOCAL_FEATURES:
2834 hci_cc_read_local_features(hdev, skb);
2837 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2838 hci_cc_read_local_ext_features(hdev, skb);
2841 case HCI_OP_READ_BUFFER_SIZE:
2842 hci_cc_read_buffer_size(hdev, skb);
2845 case HCI_OP_READ_BD_ADDR:
2846 hci_cc_read_bd_addr(hdev, skb);
2849 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2850 hci_cc_read_page_scan_activity(hdev, skb);
2853 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2854 hci_cc_write_page_scan_activity(hdev, skb);
2857 case HCI_OP_READ_PAGE_SCAN_TYPE:
2858 hci_cc_read_page_scan_type(hdev, skb);
2861 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2862 hci_cc_write_page_scan_type(hdev, skb);
2865 case HCI_OP_READ_DATA_BLOCK_SIZE:
2866 hci_cc_read_data_block_size(hdev, skb);
2869 case HCI_OP_READ_FLOW_CONTROL_MODE:
2870 hci_cc_read_flow_control_mode(hdev, skb);
2873 case HCI_OP_READ_LOCAL_AMP_INFO:
2874 hci_cc_read_local_amp_info(hdev, skb);
2877 case HCI_OP_READ_CLOCK:
2878 hci_cc_read_clock(hdev, skb);
2881 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2882 hci_cc_read_local_amp_assoc(hdev, skb);
2885 case HCI_OP_READ_INQ_RSP_TX_POWER:
2886 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2889 case HCI_OP_PIN_CODE_REPLY:
2890 hci_cc_pin_code_reply(hdev, skb);
2893 case HCI_OP_PIN_CODE_NEG_REPLY:
2894 hci_cc_pin_code_neg_reply(hdev, skb);
2897 case HCI_OP_READ_LOCAL_OOB_DATA:
2898 hci_cc_read_local_oob_data(hdev, skb);
2901 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2902 hci_cc_read_local_oob_ext_data(hdev, skb);
2905 case HCI_OP_LE_READ_BUFFER_SIZE:
2906 hci_cc_le_read_buffer_size(hdev, skb);
2909 case HCI_OP_LE_READ_LOCAL_FEATURES:
2910 hci_cc_le_read_local_features(hdev, skb);
2913 case HCI_OP_LE_READ_ADV_TX_POWER:
2914 hci_cc_le_read_adv_tx_power(hdev, skb);
2917 case HCI_OP_USER_CONFIRM_REPLY:
2918 hci_cc_user_confirm_reply(hdev, skb);
2921 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2922 hci_cc_user_confirm_neg_reply(hdev, skb);
2925 case HCI_OP_USER_PASSKEY_REPLY:
2926 hci_cc_user_passkey_reply(hdev, skb);
2929 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2930 hci_cc_user_passkey_neg_reply(hdev, skb);
2933 case HCI_OP_LE_SET_RANDOM_ADDR:
2934 hci_cc_le_set_random_addr(hdev, skb);
2937 case HCI_OP_LE_SET_ADV_ENABLE:
2938 hci_cc_le_set_adv_enable(hdev, skb);
2941 case HCI_OP_LE_SET_SCAN_PARAM:
2942 hci_cc_le_set_scan_param(hdev, skb);
2945 case HCI_OP_LE_SET_SCAN_ENABLE:
2946 hci_cc_le_set_scan_enable(hdev, skb);
2949 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2950 hci_cc_le_read_white_list_size(hdev, skb);
2953 case HCI_OP_LE_CLEAR_WHITE_LIST:
2954 hci_cc_le_clear_white_list(hdev, skb);
2957 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2958 hci_cc_le_add_to_white_list(hdev, skb);
2961 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2962 hci_cc_le_del_from_white_list(hdev, skb);
2965 case HCI_OP_LE_READ_SUPPORTED_STATES:
2966 hci_cc_le_read_supported_states(hdev, skb);
2969 case HCI_OP_LE_READ_DEF_DATA_LEN:
2970 hci_cc_le_read_def_data_len(hdev, skb);
2973 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
2974 hci_cc_le_write_def_data_len(hdev, skb);
2977 case HCI_OP_LE_READ_MAX_DATA_LEN:
2978 hci_cc_le_read_max_data_len(hdev, skb);
2981 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2982 hci_cc_write_le_host_supported(hdev, skb);
2985 case HCI_OP_LE_SET_ADV_PARAM:
2986 hci_cc_set_adv_param(hdev, skb);
2989 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2990 hci_cc_write_remote_amp_assoc(hdev, skb);
2993 case HCI_OP_READ_RSSI:
2994 hci_cc_read_rssi(hdev, skb);
2997 case HCI_OP_READ_TX_POWER:
2998 hci_cc_read_tx_power(hdev, skb);
3001 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3002 hci_cc_write_ssp_debug_mode(hdev, skb);
3006 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3010 if (opcode != HCI_OP_NOP)
3011 cancel_delayed_work(&hdev->cmd_timer);
3013 hci_req_cmd_complete(hdev, opcode, status);
3015 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
3016 atomic_set(&hdev->cmd_cnt, 1);
3017 if (!skb_queue_empty(&hdev->cmd_q))
3018 queue_work(hdev->workqueue, &hdev->cmd_work);
3022 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
3024 struct hci_ev_cmd_status *ev = (void *) skb->data;
3027 skb_pull(skb, sizeof(*ev));
3029 opcode = __le16_to_cpu(ev->opcode);
3032 case HCI_OP_INQUIRY:
3033 hci_cs_inquiry(hdev, ev->status);
3036 case HCI_OP_CREATE_CONN:
3037 hci_cs_create_conn(hdev, ev->status);
3040 case HCI_OP_DISCONNECT:
3041 hci_cs_disconnect(hdev, ev->status);
3044 case HCI_OP_ADD_SCO:
3045 hci_cs_add_sco(hdev, ev->status);
3048 case HCI_OP_AUTH_REQUESTED:
3049 hci_cs_auth_requested(hdev, ev->status);
3052 case HCI_OP_SET_CONN_ENCRYPT:
3053 hci_cs_set_conn_encrypt(hdev, ev->status);
3056 case HCI_OP_REMOTE_NAME_REQ:
3057 hci_cs_remote_name_req(hdev, ev->status);
3060 case HCI_OP_READ_REMOTE_FEATURES:
3061 hci_cs_read_remote_features(hdev, ev->status);
3064 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3065 hci_cs_read_remote_ext_features(hdev, ev->status);
3068 case HCI_OP_SETUP_SYNC_CONN:
3069 hci_cs_setup_sync_conn(hdev, ev->status);
3072 case HCI_OP_CREATE_PHY_LINK:
3073 hci_cs_create_phylink(hdev, ev->status);
3076 case HCI_OP_ACCEPT_PHY_LINK:
3077 hci_cs_accept_phylink(hdev, ev->status);
3080 case HCI_OP_SNIFF_MODE:
3081 hci_cs_sniff_mode(hdev, ev->status);
3084 case HCI_OP_EXIT_SNIFF_MODE:
3085 hci_cs_exit_sniff_mode(hdev, ev->status);
3088 case HCI_OP_SWITCH_ROLE:
3089 hci_cs_switch_role(hdev, ev->status);
3092 case HCI_OP_LE_CREATE_CONN:
3093 hci_cs_le_create_conn(hdev, ev->status);
3096 case HCI_OP_LE_START_ENC:
3097 hci_cs_le_start_enc(hdev, ev->status);
3101 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3105 if (opcode != HCI_OP_NOP)
3106 cancel_delayed_work(&hdev->cmd_timer);
3109 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
3110 hci_req_cmd_complete(hdev, opcode, ev->status);
3112 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
3113 atomic_set(&hdev->cmd_cnt, 1);
3114 if (!skb_queue_empty(&hdev->cmd_q))
3115 queue_work(hdev->workqueue, &hdev->cmd_work);
3119 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3121 struct hci_ev_hardware_error *ev = (void *) skb->data;
3123 hdev->hw_error_code = ev->code;
3125 queue_work(hdev->req_workqueue, &hdev->error_reset);
3128 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3130 struct hci_ev_role_change *ev = (void *) skb->data;
3131 struct hci_conn *conn;
3133 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3137 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3140 conn->role = ev->role;
3142 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3144 hci_role_switch_cfm(conn, ev->status, ev->role);
3147 hci_dev_unlock(hdev);
3150 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3152 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3155 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3156 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3160 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3161 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3162 BT_DBG("%s bad parameters", hdev->name);
3166 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3168 for (i = 0; i < ev->num_hndl; i++) {
3169 struct hci_comp_pkts_info *info = &ev->handles[i];
3170 struct hci_conn *conn;
3171 __u16 handle, count;
3173 handle = __le16_to_cpu(info->handle);
3174 count = __le16_to_cpu(info->count);
3176 conn = hci_conn_hash_lookup_handle(hdev, handle);
3180 conn->sent -= count;
3182 switch (conn->type) {
3184 hdev->acl_cnt += count;
3185 if (hdev->acl_cnt > hdev->acl_pkts)
3186 hdev->acl_cnt = hdev->acl_pkts;
3190 if (hdev->le_pkts) {
3191 hdev->le_cnt += count;
3192 if (hdev->le_cnt > hdev->le_pkts)
3193 hdev->le_cnt = hdev->le_pkts;
3195 hdev->acl_cnt += count;
3196 if (hdev->acl_cnt > hdev->acl_pkts)
3197 hdev->acl_cnt = hdev->acl_pkts;
3202 hdev->sco_cnt += count;
3203 if (hdev->sco_cnt > hdev->sco_pkts)
3204 hdev->sco_cnt = hdev->sco_pkts;
3208 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3213 queue_work(hdev->workqueue, &hdev->tx_work);
3216 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3219 struct hci_chan *chan;
3221 switch (hdev->dev_type) {
3223 return hci_conn_hash_lookup_handle(hdev, handle);
3225 chan = hci_chan_lookup_handle(hdev, handle);
3230 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3237 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3239 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3242 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3243 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3247 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3248 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3249 BT_DBG("%s bad parameters", hdev->name);
3253 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3256 for (i = 0; i < ev->num_hndl; i++) {
3257 struct hci_comp_blocks_info *info = &ev->handles[i];
3258 struct hci_conn *conn = NULL;
3259 __u16 handle, block_count;
3261 handle = __le16_to_cpu(info->handle);
3262 block_count = __le16_to_cpu(info->blocks);
3264 conn = __hci_conn_lookup_handle(hdev, handle);
3268 conn->sent -= block_count;
3270 switch (conn->type) {
3273 hdev->block_cnt += block_count;
3274 if (hdev->block_cnt > hdev->num_blocks)
3275 hdev->block_cnt = hdev->num_blocks;
3279 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3284 queue_work(hdev->workqueue, &hdev->tx_work);
3287 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3289 struct hci_ev_mode_change *ev = (void *) skb->data;
3290 struct hci_conn *conn;
3292 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3296 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3298 conn->mode = ev->mode;
3300 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3302 if (conn->mode == HCI_CM_ACTIVE)
3303 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3305 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3308 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3309 hci_sco_setup(conn, ev->status);
3312 hci_dev_unlock(hdev);
3315 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3317 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3318 struct hci_conn *conn;
3320 BT_DBG("%s", hdev->name);
3324 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3328 if (conn->state == BT_CONNECTED) {
3329 hci_conn_hold(conn);
3330 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3331 hci_conn_drop(conn);
3334 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3335 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3336 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3337 sizeof(ev->bdaddr), &ev->bdaddr);
3338 } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3341 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3346 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3350 hci_dev_unlock(hdev);
3353 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3355 if (key_type == HCI_LK_CHANGED_COMBINATION)
3358 conn->pin_length = pin_len;
3359 conn->key_type = key_type;
3362 case HCI_LK_LOCAL_UNIT:
3363 case HCI_LK_REMOTE_UNIT:
3364 case HCI_LK_DEBUG_COMBINATION:
3366 case HCI_LK_COMBINATION:
3368 conn->pending_sec_level = BT_SECURITY_HIGH;
3370 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3372 case HCI_LK_UNAUTH_COMBINATION_P192:
3373 case HCI_LK_UNAUTH_COMBINATION_P256:
3374 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3376 case HCI_LK_AUTH_COMBINATION_P192:
3377 conn->pending_sec_level = BT_SECURITY_HIGH;
3379 case HCI_LK_AUTH_COMBINATION_P256:
3380 conn->pending_sec_level = BT_SECURITY_FIPS;
3385 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3387 struct hci_ev_link_key_req *ev = (void *) skb->data;
3388 struct hci_cp_link_key_reply cp;
3389 struct hci_conn *conn;
3390 struct link_key *key;
3392 BT_DBG("%s", hdev->name);
3394 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3399 key = hci_find_link_key(hdev, &ev->bdaddr);
3401 BT_DBG("%s link key not found for %pMR", hdev->name,
3406 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3409 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3411 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3413 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3414 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3415 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3416 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3420 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3421 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3422 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3423 BT_DBG("%s ignoring key unauthenticated for high security",
3428 conn_set_key(conn, key->type, key->pin_len);
3431 bacpy(&cp.bdaddr, &ev->bdaddr);
3432 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3434 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3436 hci_dev_unlock(hdev);
3441 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3442 hci_dev_unlock(hdev);
3445 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3447 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3448 struct hci_conn *conn;
3449 struct link_key *key;
3453 BT_DBG("%s", hdev->name);
3457 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3461 hci_conn_hold(conn);
3462 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3463 hci_conn_drop(conn);
3465 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3466 conn_set_key(conn, ev->key_type, conn->pin_length);
3468 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3471 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3472 ev->key_type, pin_len, &persistent);
3476 /* Update connection information since adding the key will have
3477 * fixed up the type in the case of changed combination keys.
3479 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3480 conn_set_key(conn, key->type, key->pin_len);
3482 mgmt_new_link_key(hdev, key, persistent);
3484 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3485 * is set. If it's not set simply remove the key from the kernel
3486 * list (we've still notified user space about it but with
3487 * store_hint being 0).
3489 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3490 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3491 list_del_rcu(&key->list);
3492 kfree_rcu(key, rcu);
3497 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3499 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3502 hci_dev_unlock(hdev);
3505 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3507 struct hci_ev_clock_offset *ev = (void *) skb->data;
3508 struct hci_conn *conn;
3510 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3514 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3515 if (conn && !ev->status) {
3516 struct inquiry_entry *ie;
3518 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3520 ie->data.clock_offset = ev->clock_offset;
3521 ie->timestamp = jiffies;
3525 hci_dev_unlock(hdev);
3528 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3530 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3531 struct hci_conn *conn;
3533 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3537 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3538 if (conn && !ev->status)
3539 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3541 hci_dev_unlock(hdev);
3544 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3546 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3547 struct inquiry_entry *ie;
3549 BT_DBG("%s", hdev->name);
3553 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3555 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3556 ie->timestamp = jiffies;
3559 hci_dev_unlock(hdev);
3562 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3563 struct sk_buff *skb)
3565 struct inquiry_data data;
3566 int num_rsp = *((__u8 *) skb->data);
3568 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3573 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3578 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3579 struct inquiry_info_with_rssi_and_pscan_mode *info;
3580 info = (void *) (skb->data + 1);
3582 for (; num_rsp; num_rsp--, info++) {
3585 bacpy(&data.bdaddr, &info->bdaddr);
3586 data.pscan_rep_mode = info->pscan_rep_mode;
3587 data.pscan_period_mode = info->pscan_period_mode;
3588 data.pscan_mode = info->pscan_mode;
3589 memcpy(data.dev_class, info->dev_class, 3);
3590 data.clock_offset = info->clock_offset;
3591 data.rssi = info->rssi;
3592 data.ssp_mode = 0x00;
3594 flags = hci_inquiry_cache_update(hdev, &data, false);
3596 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3597 info->dev_class, info->rssi,
3598 flags, NULL, 0, NULL, 0);
3601 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3603 for (; num_rsp; num_rsp--, info++) {
3606 bacpy(&data.bdaddr, &info->bdaddr);
3607 data.pscan_rep_mode = info->pscan_rep_mode;
3608 data.pscan_period_mode = info->pscan_period_mode;
3609 data.pscan_mode = 0x00;
3610 memcpy(data.dev_class, info->dev_class, 3);
3611 data.clock_offset = info->clock_offset;
3612 data.rssi = info->rssi;
3613 data.ssp_mode = 0x00;
3615 flags = hci_inquiry_cache_update(hdev, &data, false);
3617 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3618 info->dev_class, info->rssi,
3619 flags, NULL, 0, NULL, 0);
3623 hci_dev_unlock(hdev);
3626 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3627 struct sk_buff *skb)
3629 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3630 struct hci_conn *conn;
3632 BT_DBG("%s", hdev->name);
3636 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3640 if (ev->page < HCI_MAX_PAGES)
3641 memcpy(conn->features[ev->page], ev->features, 8);
3643 if (!ev->status && ev->page == 0x01) {
3644 struct inquiry_entry *ie;
3646 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3648 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3650 if (ev->features[0] & LMP_HOST_SSP) {
3651 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3653 /* It is mandatory by the Bluetooth specification that
3654 * Extended Inquiry Results are only used when Secure
3655 * Simple Pairing is enabled, but some devices violate
3658 * To make these devices work, the internal SSP
3659 * enabled flag needs to be cleared if the remote host
3660 * features do not indicate SSP support */
3661 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3664 if (ev->features[0] & LMP_HOST_SC)
3665 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3668 if (conn->state != BT_CONFIG)
3671 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3672 struct hci_cp_remote_name_req cp;
3673 memset(&cp, 0, sizeof(cp));
3674 bacpy(&cp.bdaddr, &conn->dst);
3675 cp.pscan_rep_mode = 0x02;
3676 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3677 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3678 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3680 if (!hci_outgoing_auth_needed(hdev, conn)) {
3681 conn->state = BT_CONNECTED;
3682 hci_proto_connect_cfm(conn, ev->status);
3683 hci_conn_drop(conn);
3687 hci_dev_unlock(hdev);
3690 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3691 struct sk_buff *skb)
3693 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3694 struct hci_conn *conn;
3696 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3700 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3702 if (ev->link_type == ESCO_LINK)
3705 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3709 conn->type = SCO_LINK;
3712 switch (ev->status) {
3714 conn->handle = __le16_to_cpu(ev->handle);
3715 conn->state = BT_CONNECTED;
3717 hci_debugfs_create_conn(conn);
3718 hci_conn_add_sysfs(conn);
3721 case 0x10: /* Connection Accept Timeout */
3722 case 0x0d: /* Connection Rejected due to Limited Resources */
3723 case 0x11: /* Unsupported Feature or Parameter Value */
3724 case 0x1c: /* SCO interval rejected */
3725 case 0x1a: /* Unsupported Remote Feature */
3726 case 0x1f: /* Unspecified error */
3727 case 0x20: /* Unsupported LMP Parameter value */
3729 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3730 (hdev->esco_type & EDR_ESCO_MASK);
3731 if (hci_setup_sync(conn, conn->link->handle))
3737 conn->state = BT_CLOSED;
3741 hci_proto_connect_cfm(conn, ev->status);
3746 hci_dev_unlock(hdev);
3749 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3753 while (parsed < eir_len) {
3754 u8 field_len = eir[0];
3759 parsed += field_len + 1;
3760 eir += field_len + 1;
3766 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3767 struct sk_buff *skb)
3769 struct inquiry_data data;
3770 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3771 int num_rsp = *((__u8 *) skb->data);
3774 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3779 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3784 for (; num_rsp; num_rsp--, info++) {
3788 bacpy(&data.bdaddr, &info->bdaddr);
3789 data.pscan_rep_mode = info->pscan_rep_mode;
3790 data.pscan_period_mode = info->pscan_period_mode;
3791 data.pscan_mode = 0x00;
3792 memcpy(data.dev_class, info->dev_class, 3);
3793 data.clock_offset = info->clock_offset;
3794 data.rssi = info->rssi;
3795 data.ssp_mode = 0x01;
3797 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3798 name_known = eir_has_data_type(info->data,
3804 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3806 eir_len = eir_get_length(info->data, sizeof(info->data));
3808 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3809 info->dev_class, info->rssi,
3810 flags, info->data, eir_len, NULL, 0);
3813 hci_dev_unlock(hdev);
3816 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3817 struct sk_buff *skb)
3819 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3820 struct hci_conn *conn;
3822 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3823 __le16_to_cpu(ev->handle));
3827 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3831 /* For BR/EDR the necessary steps are taken through the
3832 * auth_complete event.
3834 if (conn->type != LE_LINK)
3838 conn->sec_level = conn->pending_sec_level;
3840 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3842 if (ev->status && conn->state == BT_CONNECTED) {
3843 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3844 hci_conn_drop(conn);
3848 if (conn->state == BT_CONFIG) {
3850 conn->state = BT_CONNECTED;
3852 hci_proto_connect_cfm(conn, ev->status);
3853 hci_conn_drop(conn);
3855 hci_auth_cfm(conn, ev->status);
3857 hci_conn_hold(conn);
3858 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3859 hci_conn_drop(conn);
3863 hci_dev_unlock(hdev);
3866 static u8 hci_get_auth_req(struct hci_conn *conn)
3868 /* If remote requests no-bonding follow that lead */
3869 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3870 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3871 return conn->remote_auth | (conn->auth_type & 0x01);
3873 /* If both remote and local have enough IO capabilities, require
3876 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3877 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3878 return conn->remote_auth | 0x01;
3880 /* No MITM protection possible so ignore remote requirement */
3881 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3884 static u8 bredr_oob_data_present(struct hci_conn *conn)
3886 struct hci_dev *hdev = conn->hdev;
3887 struct oob_data *data;
3889 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3893 /* When Secure Connections Only mode is enabled, then the P-256
3894 * values are required. If they are not available, then do not
3895 * declare that OOB data is present.
3897 if (bredr_sc_enabled(hdev) &&
3898 test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
3899 (!memcmp(data->rand256, ZERO_KEY, 16) ||
3900 !memcmp(data->hash256, ZERO_KEY, 16)))
3903 if (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) {
3904 /* When Secure Connections has been enabled, then just
3905 * return the present value stored with the OOB data. It
3906 * will contain the right information about which data
3909 if (bredr_sc_enabled(hdev))
3910 return data->present;
3912 /* When Secure Connections is not enabled or actually
3913 * not supported by the hardware, then check that if
3914 * P-192 data values are present.
3916 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
3917 !memcmp(data->hash192, ZERO_KEY, 16))
3926 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3928 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3929 struct hci_conn *conn;
3931 BT_DBG("%s", hdev->name);
3935 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3939 hci_conn_hold(conn);
3941 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3944 /* Allow pairing if we're pairable, the initiators of the
3945 * pairing or if the remote is not requesting bonding.
3947 if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
3948 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3949 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3950 struct hci_cp_io_capability_reply cp;
3952 bacpy(&cp.bdaddr, &ev->bdaddr);
3953 /* Change the IO capability from KeyboardDisplay
3954 * to DisplayYesNo as it is not supported by BT spec. */
3955 cp.capability = (conn->io_capability == 0x04) ?
3956 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3958 /* If we are initiators, there is no remote information yet */
3959 if (conn->remote_auth == 0xff) {
3960 /* Request MITM protection if our IO caps allow it
3961 * except for the no-bonding case.
3963 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3964 conn->auth_type != HCI_AT_NO_BONDING)
3965 conn->auth_type |= 0x01;
3967 conn->auth_type = hci_get_auth_req(conn);
3970 /* If we're not bondable, force one of the non-bondable
3971 * authentication requirement values.
3973 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
3974 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3976 cp.authentication = conn->auth_type;
3977 cp.oob_data = bredr_oob_data_present(conn);
3979 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3982 struct hci_cp_io_capability_neg_reply cp;
3984 bacpy(&cp.bdaddr, &ev->bdaddr);
3985 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3987 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3992 hci_dev_unlock(hdev);
3995 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3997 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3998 struct hci_conn *conn;
4000 BT_DBG("%s", hdev->name);
4004 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4008 conn->remote_cap = ev->capability;
4009 conn->remote_auth = ev->authentication;
4011 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
4014 hci_dev_unlock(hdev);
4017 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4018 struct sk_buff *skb)
4020 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4021 int loc_mitm, rem_mitm, confirm_hint = 0;
4022 struct hci_conn *conn;
4024 BT_DBG("%s", hdev->name);
4028 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4031 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4035 loc_mitm = (conn->auth_type & 0x01);
4036 rem_mitm = (conn->remote_auth & 0x01);
4038 /* If we require MITM but the remote device can't provide that
4039 * (it has NoInputNoOutput) then reject the confirmation
4040 * request. We check the security level here since it doesn't
4041 * necessarily match conn->auth_type.
4043 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4044 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4045 BT_DBG("Rejecting request: remote device can't provide MITM");
4046 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4047 sizeof(ev->bdaddr), &ev->bdaddr);
4051 /* If no side requires MITM protection; auto-accept */
4052 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4053 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4055 /* If we're not the initiators request authorization to
4056 * proceed from user space (mgmt_user_confirm with
4057 * confirm_hint set to 1). The exception is if neither
4058 * side had MITM or if the local IO capability is
4059 * NoInputNoOutput, in which case we do auto-accept
4061 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4062 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4063 (loc_mitm || rem_mitm)) {
4064 BT_DBG("Confirming auto-accept as acceptor");
4069 BT_DBG("Auto-accept of user confirmation with %ums delay",
4070 hdev->auto_accept_delay);
4072 if (hdev->auto_accept_delay > 0) {
4073 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4074 queue_delayed_work(conn->hdev->workqueue,
4075 &conn->auto_accept_work, delay);
4079 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4080 sizeof(ev->bdaddr), &ev->bdaddr);
4085 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4086 le32_to_cpu(ev->passkey), confirm_hint);
4089 hci_dev_unlock(hdev);
4092 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4093 struct sk_buff *skb)
4095 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4097 BT_DBG("%s", hdev->name);
4099 if (test_bit(HCI_MGMT, &hdev->dev_flags))
4100 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4103 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4104 struct sk_buff *skb)
4106 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4107 struct hci_conn *conn;
4109 BT_DBG("%s", hdev->name);
4111 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4115 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4116 conn->passkey_entered = 0;
4118 if (test_bit(HCI_MGMT, &hdev->dev_flags))
4119 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4120 conn->dst_type, conn->passkey_notify,
4121 conn->passkey_entered);
4124 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4126 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4127 struct hci_conn *conn;
4129 BT_DBG("%s", hdev->name);
4131 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4136 case HCI_KEYPRESS_STARTED:
4137 conn->passkey_entered = 0;
4140 case HCI_KEYPRESS_ENTERED:
4141 conn->passkey_entered++;
4144 case HCI_KEYPRESS_ERASED:
4145 conn->passkey_entered--;
4148 case HCI_KEYPRESS_CLEARED:
4149 conn->passkey_entered = 0;
4152 case HCI_KEYPRESS_COMPLETED:
4156 if (test_bit(HCI_MGMT, &hdev->dev_flags))
4157 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4158 conn->dst_type, conn->passkey_notify,
4159 conn->passkey_entered);
4162 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4163 struct sk_buff *skb)
4165 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4166 struct hci_conn *conn;
4168 BT_DBG("%s", hdev->name);
4172 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4176 /* Reset the authentication requirement to unknown */
4177 conn->remote_auth = 0xff;
4179 /* To avoid duplicate auth_failed events to user space we check
4180 * the HCI_CONN_AUTH_PEND flag which will be set if we
4181 * initiated the authentication. A traditional auth_complete
4182 * event gets always produced as initiator and is also mapped to
4183 * the mgmt_auth_failed event */
4184 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4185 mgmt_auth_failed(conn, ev->status);
4187 hci_conn_drop(conn);
4190 hci_dev_unlock(hdev);
4193 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4194 struct sk_buff *skb)
4196 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4197 struct inquiry_entry *ie;
4198 struct hci_conn *conn;
4200 BT_DBG("%s", hdev->name);
4204 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4206 memcpy(conn->features[1], ev->features, 8);
4208 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4210 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4212 hci_dev_unlock(hdev);
4215 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4216 struct sk_buff *skb)
4218 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4219 struct oob_data *data;
4221 BT_DBG("%s", hdev->name);
4225 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4228 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4230 struct hci_cp_remote_oob_data_neg_reply cp;
4232 bacpy(&cp.bdaddr, &ev->bdaddr);
4233 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4238 if (bredr_sc_enabled(hdev)) {
4239 struct hci_cp_remote_oob_ext_data_reply cp;
4241 bacpy(&cp.bdaddr, &ev->bdaddr);
4242 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4243 memset(cp.hash192, 0, sizeof(cp.hash192));
4244 memset(cp.rand192, 0, sizeof(cp.rand192));
4246 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4247 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4249 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4250 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4252 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4255 struct hci_cp_remote_oob_data_reply cp;
4257 bacpy(&cp.bdaddr, &ev->bdaddr);
4258 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4259 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4261 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4266 hci_dev_unlock(hdev);
4269 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4270 struct sk_buff *skb)
4272 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4273 struct hci_conn *hcon, *bredr_hcon;
4275 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4280 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4282 hci_dev_unlock(hdev);
4288 hci_dev_unlock(hdev);
4292 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4294 hcon->state = BT_CONNECTED;
4295 bacpy(&hcon->dst, &bredr_hcon->dst);
4297 hci_conn_hold(hcon);
4298 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4299 hci_conn_drop(hcon);
4301 hci_debugfs_create_conn(hcon);
4302 hci_conn_add_sysfs(hcon);
4304 amp_physical_cfm(bredr_hcon, hcon);
4306 hci_dev_unlock(hdev);
4309 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4311 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4312 struct hci_conn *hcon;
4313 struct hci_chan *hchan;
4314 struct amp_mgr *mgr;
4316 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4317 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4320 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4324 /* Create AMP hchan */
4325 hchan = hci_chan_create(hcon);
4329 hchan->handle = le16_to_cpu(ev->handle);
4331 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4333 mgr = hcon->amp_mgr;
4334 if (mgr && mgr->bredr_chan) {
4335 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4337 l2cap_chan_lock(bredr_chan);
4339 bredr_chan->conn->mtu = hdev->block_mtu;
4340 l2cap_logical_cfm(bredr_chan, hchan, 0);
4341 hci_conn_hold(hcon);
4343 l2cap_chan_unlock(bredr_chan);
4347 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4348 struct sk_buff *skb)
4350 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4351 struct hci_chan *hchan;
4353 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4354 le16_to_cpu(ev->handle), ev->status);
4361 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4365 amp_destroy_logical_link(hchan, ev->reason);
4368 hci_dev_unlock(hdev);
4371 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4372 struct sk_buff *skb)
4374 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4375 struct hci_conn *hcon;
4377 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4384 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4386 hcon->state = BT_CLOSED;
4390 hci_dev_unlock(hdev);
4393 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4395 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4396 struct hci_conn_params *params;
4397 struct hci_conn *conn;
4398 struct smp_irk *irk;
4401 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4405 /* All controllers implicitly stop advertising in the event of a
4406 * connection, so ensure that the state bit is cleared.
4408 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4410 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4412 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4414 BT_ERR("No memory for new connection");
4418 conn->dst_type = ev->bdaddr_type;
4420 /* If we didn't have a hci_conn object previously
4421 * but we're in master role this must be something
4422 * initiated using a white list. Since white list based
4423 * connections are not "first class citizens" we don't
4424 * have full tracking of them. Therefore, we go ahead
4425 * with a "best effort" approach of determining the
4426 * initiator address based on the HCI_PRIVACY flag.
4429 conn->resp_addr_type = ev->bdaddr_type;
4430 bacpy(&conn->resp_addr, &ev->bdaddr);
4431 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4432 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4433 bacpy(&conn->init_addr, &hdev->rpa);
4435 hci_copy_identity_address(hdev,
4437 &conn->init_addr_type);
4441 cancel_delayed_work(&conn->le_conn_timeout);
4445 /* Set the responder (our side) address type based on
4446 * the advertising address type.
4448 conn->resp_addr_type = hdev->adv_addr_type;
4449 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4450 bacpy(&conn->resp_addr, &hdev->random_addr);
4452 bacpy(&conn->resp_addr, &hdev->bdaddr);
4454 conn->init_addr_type = ev->bdaddr_type;
4455 bacpy(&conn->init_addr, &ev->bdaddr);
4457 /* For incoming connections, set the default minimum
4458 * and maximum connection interval. They will be used
4459 * to check if the parameters are in range and if not
4460 * trigger the connection update procedure.
4462 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4463 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4466 /* Lookup the identity address from the stored connection
4467 * address and address type.
4469 * When establishing connections to an identity address, the
4470 * connection procedure will store the resolvable random
4471 * address first. Now if it can be converted back into the
4472 * identity address, start using the identity address from
4475 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4477 bacpy(&conn->dst, &irk->bdaddr);
4478 conn->dst_type = irk->addr_type;
4482 hci_le_conn_failed(conn, ev->status);
4486 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4487 addr_type = BDADDR_LE_PUBLIC;
4489 addr_type = BDADDR_LE_RANDOM;
4491 /* Drop the connection if the device is blocked */
4492 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4493 hci_conn_drop(conn);
4497 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4498 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4500 conn->sec_level = BT_SECURITY_LOW;
4501 conn->handle = __le16_to_cpu(ev->handle);
4502 conn->state = BT_CONNECTED;
4504 conn->le_conn_interval = le16_to_cpu(ev->interval);
4505 conn->le_conn_latency = le16_to_cpu(ev->latency);
4506 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4508 hci_debugfs_create_conn(conn);
4509 hci_conn_add_sysfs(conn);
4511 hci_proto_connect_cfm(conn, ev->status);
4513 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4516 list_del_init(¶ms->action);
4518 hci_conn_drop(params->conn);
4519 hci_conn_put(params->conn);
4520 params->conn = NULL;
4525 hci_update_background_scan(hdev);
4526 hci_dev_unlock(hdev);
4529 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4530 struct sk_buff *skb)
4532 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4533 struct hci_conn *conn;
4535 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4542 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4544 conn->le_conn_interval = le16_to_cpu(ev->interval);
4545 conn->le_conn_latency = le16_to_cpu(ev->latency);
4546 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4549 hci_dev_unlock(hdev);
4552 /* This function requires the caller holds hdev->lock */
4553 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4555 u8 addr_type, u8 adv_type)
4557 struct hci_conn *conn;
4558 struct hci_conn_params *params;
4560 /* If the event is not connectable don't proceed further */
4561 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4564 /* Ignore if the device is blocked */
4565 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4568 /* Most controller will fail if we try to create new connections
4569 * while we have an existing one in slave role.
4571 if (hdev->conn_hash.le_num_slave > 0)
4574 /* If we're not connectable only connect devices that we have in
4575 * our pend_le_conns list.
4577 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4582 switch (params->auto_connect) {
4583 case HCI_AUTO_CONN_DIRECT:
4584 /* Only devices advertising with ADV_DIRECT_IND are
4585 * triggering a connection attempt. This is allowing
4586 * incoming connections from slave devices.
4588 if (adv_type != LE_ADV_DIRECT_IND)
4591 case HCI_AUTO_CONN_ALWAYS:
4592 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4593 * are triggering a connection attempt. This means
4594 * that incoming connectioms from slave device are
4595 * accepted and also outgoing connections to slave
4596 * devices are established when found.
4603 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4604 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4605 if (!IS_ERR(conn)) {
4606 /* Store the pointer since we don't really have any
4607 * other owner of the object besides the params that
4608 * triggered it. This way we can abort the connection if
4609 * the parameters get removed and keep the reference
4610 * count consistent once the connection is established.
4612 params->conn = hci_conn_get(conn);
4616 switch (PTR_ERR(conn)) {
4618 /* If hci_connect() returns -EBUSY it means there is already
4619 * an LE connection attempt going on. Since controllers don't
4620 * support more than one connection attempt at the time, we
4621 * don't consider this an error case.
4625 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4632 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4633 u8 bdaddr_type, bdaddr_t *direct_addr,
4634 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4636 struct discovery_state *d = &hdev->discovery;
4637 struct smp_irk *irk;
4638 struct hci_conn *conn;
4642 /* If the direct address is present, then this report is from
4643 * a LE Direct Advertising Report event. In that case it is
4644 * important to see if the address is matching the local
4645 * controller address.
4648 /* Only resolvable random addresses are valid for these
4649 * kind of reports and others can be ignored.
4651 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4654 /* If the controller is not using resolvable random
4655 * addresses, then this report can be ignored.
4657 if (!test_bit(HCI_PRIVACY, &hdev->dev_flags))
4660 /* If the local IRK of the controller does not match
4661 * with the resolvable random address provided, then
4662 * this report can be ignored.
4664 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4668 /* Check if we need to convert to identity address */
4669 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4671 bdaddr = &irk->bdaddr;
4672 bdaddr_type = irk->addr_type;
4675 /* Check if we have been requested to connect to this device */
4676 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4677 if (conn && type == LE_ADV_IND) {
4678 /* Store report for later inclusion by
4679 * mgmt_device_connected
4681 memcpy(conn->le_adv_data, data, len);
4682 conn->le_adv_data_len = len;
4685 /* Passive scanning shouldn't trigger any device found events,
4686 * except for devices marked as CONN_REPORT for which we do send
4687 * device found events.
4689 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4690 if (type == LE_ADV_DIRECT_IND)
4693 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4694 bdaddr, bdaddr_type))
4697 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4698 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4701 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4702 rssi, flags, data, len, NULL, 0);
4706 /* When receiving non-connectable or scannable undirected
4707 * advertising reports, this means that the remote device is
4708 * not connectable and then clearly indicate this in the
4709 * device found event.
4711 * When receiving a scan response, then there is no way to
4712 * know if the remote device is connectable or not. However
4713 * since scan responses are merged with a previously seen
4714 * advertising report, the flags field from that report
4717 * In the really unlikely case that a controller get confused
4718 * and just sends a scan response event, then it is marked as
4719 * not connectable as well.
4721 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4722 type == LE_ADV_SCAN_RSP)
4723 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4727 /* If there's nothing pending either store the data from this
4728 * event or send an immediate device found event if the data
4729 * should not be stored for later.
4731 if (!has_pending_adv_report(hdev)) {
4732 /* If the report will trigger a SCAN_REQ store it for
4735 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4736 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4737 rssi, flags, data, len);
4741 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4742 rssi, flags, data, len, NULL, 0);
4746 /* Check if the pending report is for the same device as the new one */
4747 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4748 bdaddr_type == d->last_adv_addr_type);
4750 /* If the pending data doesn't match this report or this isn't a
4751 * scan response (e.g. we got a duplicate ADV_IND) then force
4752 * sending of the pending data.
4754 if (type != LE_ADV_SCAN_RSP || !match) {
4755 /* Send out whatever is in the cache, but skip duplicates */
4757 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4758 d->last_adv_addr_type, NULL,
4759 d->last_adv_rssi, d->last_adv_flags,
4761 d->last_adv_data_len, NULL, 0);
4763 /* If the new report will trigger a SCAN_REQ store it for
4766 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4767 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4768 rssi, flags, data, len);
4772 /* The advertising reports cannot be merged, so clear
4773 * the pending report and send out a device found event.
4775 clear_pending_adv_report(hdev);
4776 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4777 rssi, flags, data, len, NULL, 0);
4781 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4782 * the new event is a SCAN_RSP. We can therefore proceed with
4783 * sending a merged device found event.
4785 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4786 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4787 d->last_adv_data, d->last_adv_data_len, data, len);
4788 clear_pending_adv_report(hdev);
4791 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4793 u8 num_reports = skb->data[0];
4794 void *ptr = &skb->data[1];
4798 while (num_reports--) {
4799 struct hci_ev_le_advertising_info *ev = ptr;
4802 rssi = ev->data[ev->length];
4803 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4804 ev->bdaddr_type, NULL, 0, rssi,
4805 ev->data, ev->length);
4807 ptr += sizeof(*ev) + ev->length + 1;
4810 hci_dev_unlock(hdev);
4813 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4815 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4816 struct hci_cp_le_ltk_reply cp;
4817 struct hci_cp_le_ltk_neg_reply neg;
4818 struct hci_conn *conn;
4819 struct smp_ltk *ltk;
4821 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4825 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4829 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4833 if (smp_ltk_is_sc(ltk)) {
4834 /* With SC both EDiv and Rand are set to zero */
4835 if (ev->ediv || ev->rand)
4838 /* For non-SC keys check that EDiv and Rand match */
4839 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4843 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4844 cp.handle = cpu_to_le16(conn->handle);
4846 conn->pending_sec_level = smp_ltk_sec_level(ltk);
4848 conn->enc_key_size = ltk->enc_size;
4850 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4852 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4853 * temporary key used to encrypt a connection following
4854 * pairing. It is used during the Encrypted Session Setup to
4855 * distribute the keys. Later, security can be re-established
4856 * using a distributed LTK.
4858 if (ltk->type == SMP_STK) {
4859 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4860 list_del_rcu(<k->list);
4861 kfree_rcu(ltk, rcu);
4863 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4866 hci_dev_unlock(hdev);
4871 neg.handle = ev->handle;
4872 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4873 hci_dev_unlock(hdev);
4876 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4879 struct hci_cp_le_conn_param_req_neg_reply cp;
4881 cp.handle = cpu_to_le16(handle);
4884 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4888 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4889 struct sk_buff *skb)
4891 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4892 struct hci_cp_le_conn_param_req_reply cp;
4893 struct hci_conn *hcon;
4894 u16 handle, min, max, latency, timeout;
4896 handle = le16_to_cpu(ev->handle);
4897 min = le16_to_cpu(ev->interval_min);
4898 max = le16_to_cpu(ev->interval_max);
4899 latency = le16_to_cpu(ev->latency);
4900 timeout = le16_to_cpu(ev->timeout);
4902 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4903 if (!hcon || hcon->state != BT_CONNECTED)
4904 return send_conn_param_neg_reply(hdev, handle,
4905 HCI_ERROR_UNKNOWN_CONN_ID);
4907 if (hci_check_conn_params(min, max, latency, timeout))
4908 return send_conn_param_neg_reply(hdev, handle,
4909 HCI_ERROR_INVALID_LL_PARAMS);
4911 if (hcon->role == HCI_ROLE_MASTER) {
4912 struct hci_conn_params *params;
4917 params = hci_conn_params_lookup(hdev, &hcon->dst,
4920 params->conn_min_interval = min;
4921 params->conn_max_interval = max;
4922 params->conn_latency = latency;
4923 params->supervision_timeout = timeout;
4929 hci_dev_unlock(hdev);
4931 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4932 store_hint, min, max, latency, timeout);
4935 cp.handle = ev->handle;
4936 cp.interval_min = ev->interval_min;
4937 cp.interval_max = ev->interval_max;
4938 cp.latency = ev->latency;
4939 cp.timeout = ev->timeout;
4943 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4946 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
4947 struct sk_buff *skb)
4949 u8 num_reports = skb->data[0];
4950 void *ptr = &skb->data[1];
4954 while (num_reports--) {
4955 struct hci_ev_le_direct_adv_info *ev = ptr;
4957 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4958 ev->bdaddr_type, &ev->direct_addr,
4959 ev->direct_addr_type, ev->rssi, NULL, 0);
4964 hci_dev_unlock(hdev);
4967 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4969 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4971 skb_pull(skb, sizeof(*le_ev));
4973 switch (le_ev->subevent) {
4974 case HCI_EV_LE_CONN_COMPLETE:
4975 hci_le_conn_complete_evt(hdev, skb);
4978 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4979 hci_le_conn_update_complete_evt(hdev, skb);
4982 case HCI_EV_LE_ADVERTISING_REPORT:
4983 hci_le_adv_report_evt(hdev, skb);
4986 case HCI_EV_LE_LTK_REQ:
4987 hci_le_ltk_request_evt(hdev, skb);
4990 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4991 hci_le_remote_conn_param_req_evt(hdev, skb);
4994 case HCI_EV_LE_DIRECT_ADV_REPORT:
4995 hci_le_direct_adv_report_evt(hdev, skb);
5003 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5005 struct hci_ev_channel_selected *ev = (void *) skb->data;
5006 struct hci_conn *hcon;
5008 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5010 skb_pull(skb, sizeof(*ev));
5012 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5016 amp_read_loc_assoc_final_data(hdev, hcon);
5019 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5021 struct hci_event_hdr *hdr = (void *) skb->data;
5022 __u8 event = hdr->evt;
5026 /* Received events are (currently) only needed when a request is
5027 * ongoing so avoid unnecessary memory allocation.
5029 if (hci_req_pending(hdev)) {
5030 kfree_skb(hdev->recv_evt);
5031 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
5034 hci_dev_unlock(hdev);
5036 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5038 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
5039 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5040 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
5042 hci_req_cmd_complete(hdev, opcode, 0);
5046 case HCI_EV_INQUIRY_COMPLETE:
5047 hci_inquiry_complete_evt(hdev, skb);
5050 case HCI_EV_INQUIRY_RESULT:
5051 hci_inquiry_result_evt(hdev, skb);
5054 case HCI_EV_CONN_COMPLETE:
5055 hci_conn_complete_evt(hdev, skb);
5058 case HCI_EV_CONN_REQUEST:
5059 hci_conn_request_evt(hdev, skb);
5062 case HCI_EV_DISCONN_COMPLETE:
5063 hci_disconn_complete_evt(hdev, skb);
5066 case HCI_EV_AUTH_COMPLETE:
5067 hci_auth_complete_evt(hdev, skb);
5070 case HCI_EV_REMOTE_NAME:
5071 hci_remote_name_evt(hdev, skb);
5074 case HCI_EV_ENCRYPT_CHANGE:
5075 hci_encrypt_change_evt(hdev, skb);
5078 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5079 hci_change_link_key_complete_evt(hdev, skb);
5082 case HCI_EV_REMOTE_FEATURES:
5083 hci_remote_features_evt(hdev, skb);
5086 case HCI_EV_CMD_COMPLETE:
5087 hci_cmd_complete_evt(hdev, skb);
5090 case HCI_EV_CMD_STATUS:
5091 hci_cmd_status_evt(hdev, skb);
5094 case HCI_EV_HARDWARE_ERROR:
5095 hci_hardware_error_evt(hdev, skb);
5098 case HCI_EV_ROLE_CHANGE:
5099 hci_role_change_evt(hdev, skb);
5102 case HCI_EV_NUM_COMP_PKTS:
5103 hci_num_comp_pkts_evt(hdev, skb);
5106 case HCI_EV_MODE_CHANGE:
5107 hci_mode_change_evt(hdev, skb);
5110 case HCI_EV_PIN_CODE_REQ:
5111 hci_pin_code_request_evt(hdev, skb);
5114 case HCI_EV_LINK_KEY_REQ:
5115 hci_link_key_request_evt(hdev, skb);
5118 case HCI_EV_LINK_KEY_NOTIFY:
5119 hci_link_key_notify_evt(hdev, skb);
5122 case HCI_EV_CLOCK_OFFSET:
5123 hci_clock_offset_evt(hdev, skb);
5126 case HCI_EV_PKT_TYPE_CHANGE:
5127 hci_pkt_type_change_evt(hdev, skb);
5130 case HCI_EV_PSCAN_REP_MODE:
5131 hci_pscan_rep_mode_evt(hdev, skb);
5134 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5135 hci_inquiry_result_with_rssi_evt(hdev, skb);
5138 case HCI_EV_REMOTE_EXT_FEATURES:
5139 hci_remote_ext_features_evt(hdev, skb);
5142 case HCI_EV_SYNC_CONN_COMPLETE:
5143 hci_sync_conn_complete_evt(hdev, skb);
5146 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5147 hci_extended_inquiry_result_evt(hdev, skb);
5150 case HCI_EV_KEY_REFRESH_COMPLETE:
5151 hci_key_refresh_complete_evt(hdev, skb);
5154 case HCI_EV_IO_CAPA_REQUEST:
5155 hci_io_capa_request_evt(hdev, skb);
5158 case HCI_EV_IO_CAPA_REPLY:
5159 hci_io_capa_reply_evt(hdev, skb);
5162 case HCI_EV_USER_CONFIRM_REQUEST:
5163 hci_user_confirm_request_evt(hdev, skb);
5166 case HCI_EV_USER_PASSKEY_REQUEST:
5167 hci_user_passkey_request_evt(hdev, skb);
5170 case HCI_EV_USER_PASSKEY_NOTIFY:
5171 hci_user_passkey_notify_evt(hdev, skb);
5174 case HCI_EV_KEYPRESS_NOTIFY:
5175 hci_keypress_notify_evt(hdev, skb);
5178 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5179 hci_simple_pair_complete_evt(hdev, skb);
5182 case HCI_EV_REMOTE_HOST_FEATURES:
5183 hci_remote_host_features_evt(hdev, skb);
5186 case HCI_EV_LE_META:
5187 hci_le_meta_evt(hdev, skb);
5190 case HCI_EV_CHANNEL_SELECTED:
5191 hci_chan_selected_evt(hdev, skb);
5194 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5195 hci_remote_oob_data_request_evt(hdev, skb);
5198 case HCI_EV_PHY_LINK_COMPLETE:
5199 hci_phy_link_complete_evt(hdev, skb);
5202 case HCI_EV_LOGICAL_LINK_COMPLETE:
5203 hci_loglink_complete_evt(hdev, skb);
5206 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5207 hci_disconn_loglink_complete_evt(hdev, skb);
5210 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5211 hci_disconn_phylink_complete_evt(hdev, skb);
5214 case HCI_EV_NUM_COMP_BLOCKS:
5215 hci_num_comp_blocks_evt(hdev, skb);
5219 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5224 hdev->stat.evt_rx++;