2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
37 /* Handle HCI Event packets */
39 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
41 __u8 status = *((__u8 *) skb->data);
43 BT_DBG("%s status 0x%2.2x", hdev->name, status);
48 clear_bit(HCI_INQUIRY, &hdev->flags);
49 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50 wake_up_bit(&hdev->flags, HCI_INQUIRY);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
56 hci_conn_check_pending(hdev);
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
61 __u8 status = *((__u8 *) skb->data);
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
73 __u8 status = *((__u8 *) skb->data);
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
82 hci_conn_check_pending(hdev);
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 BT_DBG("%s", hdev->name);
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 clear_bit(HCI_CONN_MASTER, &conn->flags);
108 set_bit(HCI_CONN_MASTER, &conn->flags);
111 hci_dev_unlock(hdev);
114 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
116 struct hci_rp_read_link_policy *rp = (void *) skb->data;
117 struct hci_conn *conn;
119 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
126 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
128 conn->link_policy = __le16_to_cpu(rp->policy);
130 hci_dev_unlock(hdev);
133 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
135 struct hci_rp_write_link_policy *rp = (void *) skb->data;
136 struct hci_conn *conn;
139 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
144 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 conn->link_policy = get_unaligned_le16(sent + 2);
154 hci_dev_unlock(hdev);
157 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
160 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
167 hdev->link_policy = __le16_to_cpu(rp->policy);
170 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
173 __u8 status = *((__u8 *) skb->data);
176 BT_DBG("%s status 0x%2.2x", hdev->name, status);
181 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
185 hdev->link_policy = get_unaligned_le16(sent);
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
190 __u8 status = *((__u8 *) skb->data);
192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
194 clear_bit(HCI_RESET, &hdev->flags);
196 /* Reset all non-persistent flags */
197 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
199 hdev->discovery.state = DISCOVERY_STOPPED;
200 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
201 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
203 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
204 hdev->adv_data_len = 0;
206 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
207 hdev->scan_rsp_data_len = 0;
209 hdev->le_scan_type = LE_SCAN_PASSIVE;
211 hdev->ssp_debug_mode = 0;
214 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
216 __u8 status = *((__u8 *) skb->data);
219 BT_DBG("%s status 0x%2.2x", hdev->name, status);
221 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
227 if (test_bit(HCI_MGMT, &hdev->dev_flags))
228 mgmt_set_local_name_complete(hdev, sent, status);
230 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
232 hci_dev_unlock(hdev);
235 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
237 struct hci_rp_read_local_name *rp = (void *) skb->data;
239 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
244 if (test_bit(HCI_SETUP, &hdev->dev_flags))
245 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
248 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
250 __u8 status = *((__u8 *) skb->data);
253 BT_DBG("%s status 0x%2.2x", hdev->name, status);
255 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
260 __u8 param = *((__u8 *) sent);
262 if (param == AUTH_ENABLED)
263 set_bit(HCI_AUTH, &hdev->flags);
265 clear_bit(HCI_AUTH, &hdev->flags);
268 if (test_bit(HCI_MGMT, &hdev->dev_flags))
269 mgmt_auth_enable_complete(hdev, status);
272 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
274 __u8 status = *((__u8 *) skb->data);
278 BT_DBG("%s status 0x%2.2x", hdev->name, status);
283 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
287 param = *((__u8 *) sent);
290 set_bit(HCI_ENCRYPT, &hdev->flags);
292 clear_bit(HCI_ENCRYPT, &hdev->flags);
295 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
297 __u8 status = *((__u8 *) skb->data);
299 int old_pscan, old_iscan;
302 BT_DBG("%s status 0x%2.2x", hdev->name, status);
304 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
308 param = *((__u8 *) sent);
313 mgmt_write_scan_failed(hdev, param, status);
314 hdev->discov_timeout = 0;
318 /* We need to ensure that we set this back on if someone changed
319 * the scan mode through a raw HCI socket.
321 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
323 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
324 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
326 if (param & SCAN_INQUIRY) {
327 set_bit(HCI_ISCAN, &hdev->flags);
329 mgmt_discoverable(hdev, 1);
330 } else if (old_iscan)
331 mgmt_discoverable(hdev, 0);
333 if (param & SCAN_PAGE) {
334 set_bit(HCI_PSCAN, &hdev->flags);
336 mgmt_connectable(hdev, 1);
337 } else if (old_pscan)
338 mgmt_connectable(hdev, 0);
341 hci_dev_unlock(hdev);
344 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
346 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
348 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
353 memcpy(hdev->dev_class, rp->dev_class, 3);
355 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
356 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
359 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
361 __u8 status = *((__u8 *) skb->data);
364 BT_DBG("%s status 0x%2.2x", hdev->name, status);
366 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
373 memcpy(hdev->dev_class, sent, 3);
375 if (test_bit(HCI_MGMT, &hdev->dev_flags))
376 mgmt_set_class_of_dev_complete(hdev, sent, status);
378 hci_dev_unlock(hdev);
381 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
383 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
386 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
391 setting = __le16_to_cpu(rp->voice_setting);
393 if (hdev->voice_setting == setting)
396 hdev->voice_setting = setting;
398 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
401 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
404 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
407 __u8 status = *((__u8 *) skb->data);
411 BT_DBG("%s status 0x%2.2x", hdev->name, status);
416 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
420 setting = get_unaligned_le16(sent);
422 if (hdev->voice_setting == setting)
425 hdev->voice_setting = setting;
427 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
430 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
433 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
436 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
438 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
443 hdev->num_iac = rp->num_iac;
445 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
448 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
450 __u8 status = *((__u8 *) skb->data);
451 struct hci_cp_write_ssp_mode *sent;
453 BT_DBG("%s status 0x%2.2x", hdev->name, status);
455 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
461 hdev->features[1][0] |= LMP_HOST_SSP;
463 hdev->features[1][0] &= ~LMP_HOST_SSP;
466 if (test_bit(HCI_MGMT, &hdev->dev_flags))
467 mgmt_ssp_enable_complete(hdev, sent->mode, status);
470 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
472 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
476 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
478 u8 status = *((u8 *) skb->data);
479 struct hci_cp_write_sc_support *sent;
481 BT_DBG("%s status 0x%2.2x", hdev->name, status);
483 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
489 hdev->features[1][0] |= LMP_HOST_SC;
491 hdev->features[1][0] &= ~LMP_HOST_SC;
494 if (test_bit(HCI_MGMT, &hdev->dev_flags))
495 mgmt_sc_enable_complete(hdev, sent->support, status);
498 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
500 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
504 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
506 struct hci_rp_read_local_version *rp = (void *) skb->data;
508 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
513 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
514 hdev->hci_ver = rp->hci_ver;
515 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
516 hdev->lmp_ver = rp->lmp_ver;
517 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
518 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
522 static void hci_cc_read_local_commands(struct hci_dev *hdev,
525 struct hci_rp_read_local_commands *rp = (void *) skb->data;
527 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
532 if (test_bit(HCI_SETUP, &hdev->dev_flags))
533 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
536 static void hci_cc_read_local_features(struct hci_dev *hdev,
539 struct hci_rp_read_local_features *rp = (void *) skb->data;
541 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
546 memcpy(hdev->features, rp->features, 8);
548 /* Adjust default settings according to features
549 * supported by device. */
551 if (hdev->features[0][0] & LMP_3SLOT)
552 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
554 if (hdev->features[0][0] & LMP_5SLOT)
555 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
557 if (hdev->features[0][1] & LMP_HV2) {
558 hdev->pkt_type |= (HCI_HV2);
559 hdev->esco_type |= (ESCO_HV2);
562 if (hdev->features[0][1] & LMP_HV3) {
563 hdev->pkt_type |= (HCI_HV3);
564 hdev->esco_type |= (ESCO_HV3);
567 if (lmp_esco_capable(hdev))
568 hdev->esco_type |= (ESCO_EV3);
570 if (hdev->features[0][4] & LMP_EV4)
571 hdev->esco_type |= (ESCO_EV4);
573 if (hdev->features[0][4] & LMP_EV5)
574 hdev->esco_type |= (ESCO_EV5);
576 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
577 hdev->esco_type |= (ESCO_2EV3);
579 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
580 hdev->esco_type |= (ESCO_3EV3);
582 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
583 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
586 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
589 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
591 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
596 if (hdev->max_page < rp->max_page)
597 hdev->max_page = rp->max_page;
599 if (rp->page < HCI_MAX_PAGES)
600 memcpy(hdev->features[rp->page], rp->features, 8);
603 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
606 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
608 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
613 hdev->flow_ctl_mode = rp->mode;
616 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
618 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
620 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
625 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
626 hdev->sco_mtu = rp->sco_mtu;
627 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
628 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
630 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
635 hdev->acl_cnt = hdev->acl_pkts;
636 hdev->sco_cnt = hdev->sco_pkts;
638 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
639 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
642 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
644 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
646 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651 if (test_bit(HCI_INIT, &hdev->flags))
652 bacpy(&hdev->bdaddr, &rp->bdaddr);
654 if (test_bit(HCI_SETUP, &hdev->dev_flags))
655 bacpy(&hdev->setup_addr, &rp->bdaddr);
658 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
661 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
663 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
668 if (test_bit(HCI_INIT, &hdev->flags)) {
669 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
670 hdev->page_scan_window = __le16_to_cpu(rp->window);
674 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
677 u8 status = *((u8 *) skb->data);
678 struct hci_cp_write_page_scan_activity *sent;
680 BT_DBG("%s status 0x%2.2x", hdev->name, status);
685 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
689 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
690 hdev->page_scan_window = __le16_to_cpu(sent->window);
693 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
696 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
698 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
703 if (test_bit(HCI_INIT, &hdev->flags))
704 hdev->page_scan_type = rp->type;
707 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
710 u8 status = *((u8 *) skb->data);
713 BT_DBG("%s status 0x%2.2x", hdev->name, status);
718 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
720 hdev->page_scan_type = *type;
723 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
726 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
728 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
733 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
734 hdev->block_len = __le16_to_cpu(rp->block_len);
735 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
737 hdev->block_cnt = hdev->num_blocks;
739 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
740 hdev->block_cnt, hdev->block_len);
743 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
745 struct hci_rp_read_clock *rp = (void *) skb->data;
746 struct hci_cp_read_clock *cp;
747 struct hci_conn *conn;
749 BT_DBG("%s", hdev->name);
751 if (skb->len < sizeof(*rp))
759 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
763 if (cp->which == 0x00) {
764 hdev->clock = le32_to_cpu(rp->clock);
768 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
770 conn->clock = le32_to_cpu(rp->clock);
771 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
775 hci_dev_unlock(hdev);
778 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
781 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
783 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
788 hdev->amp_status = rp->amp_status;
789 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
790 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
791 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
792 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
793 hdev->amp_type = rp->amp_type;
794 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
795 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
796 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
797 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
800 a2mp_send_getinfo_rsp(hdev);
803 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
806 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
807 struct amp_assoc *assoc = &hdev->loc_assoc;
808 size_t rem_len, frag_len;
810 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
815 frag_len = skb->len - sizeof(*rp);
816 rem_len = __le16_to_cpu(rp->rem_len);
818 if (rem_len > frag_len) {
819 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
821 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
822 assoc->offset += frag_len;
824 /* Read other fragments */
825 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
830 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
831 assoc->len = assoc->offset + rem_len;
835 /* Send A2MP Rsp when all fragments are received */
836 a2mp_send_getampassoc_rsp(hdev, rp->status);
837 a2mp_send_create_phy_link_req(hdev, rp->status);
840 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
843 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
845 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
850 hdev->inq_tx_power = rp->tx_power;
853 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
855 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
856 struct hci_cp_pin_code_reply *cp;
857 struct hci_conn *conn;
859 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
863 if (test_bit(HCI_MGMT, &hdev->dev_flags))
864 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
869 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
873 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
875 conn->pin_length = cp->pin_len;
878 hci_dev_unlock(hdev);
881 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
883 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
885 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
889 if (test_bit(HCI_MGMT, &hdev->dev_flags))
890 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
893 hci_dev_unlock(hdev);
896 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
899 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
901 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
906 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
907 hdev->le_pkts = rp->le_max_pkt;
909 hdev->le_cnt = hdev->le_pkts;
911 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
914 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
917 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
919 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
924 memcpy(hdev->le_features, rp->features, 8);
927 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
930 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
932 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
937 hdev->adv_tx_power = rp->tx_power;
940 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
942 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
944 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
948 if (test_bit(HCI_MGMT, &hdev->dev_flags))
949 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
952 hci_dev_unlock(hdev);
955 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
958 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
960 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
964 if (test_bit(HCI_MGMT, &hdev->dev_flags))
965 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
966 ACL_LINK, 0, rp->status);
968 hci_dev_unlock(hdev);
971 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
973 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
975 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
979 if (test_bit(HCI_MGMT, &hdev->dev_flags))
980 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
983 hci_dev_unlock(hdev);
986 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
989 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
991 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
995 if (test_bit(HCI_MGMT, &hdev->dev_flags))
996 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
997 ACL_LINK, 0, rp->status);
999 hci_dev_unlock(hdev);
1002 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1003 struct sk_buff *skb)
1005 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1007 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1010 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
1011 NULL, NULL, rp->status);
1012 hci_dev_unlock(hdev);
1015 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1016 struct sk_buff *skb)
1018 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1020 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1023 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
1024 rp->hash256, rp->randomizer256,
1026 hci_dev_unlock(hdev);
1030 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1032 __u8 status = *((__u8 *) skb->data);
1035 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1040 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1046 bacpy(&hdev->random_addr, sent);
1048 hci_dev_unlock(hdev);
1051 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1053 __u8 *sent, status = *((__u8 *) skb->data);
1055 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1060 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1066 /* If we're doing connection initation as peripheral. Set a
1067 * timeout in case something goes wrong.
1070 struct hci_conn *conn;
1072 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1074 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1076 queue_delayed_work(hdev->workqueue,
1077 &conn->le_conn_timeout,
1078 conn->conn_timeout);
1080 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1083 hci_dev_unlock(hdev);
1086 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1088 struct hci_cp_le_set_scan_param *cp;
1089 __u8 status = *((__u8 *) skb->data);
1091 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1096 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1102 hdev->le_scan_type = cp->type;
1104 hci_dev_unlock(hdev);
1107 static bool has_pending_adv_report(struct hci_dev *hdev)
1109 struct discovery_state *d = &hdev->discovery;
1111 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1114 static void clear_pending_adv_report(struct hci_dev *hdev)
1116 struct discovery_state *d = &hdev->discovery;
1118 bacpy(&d->last_adv_addr, BDADDR_ANY);
1119 d->last_adv_data_len = 0;
1122 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1123 u8 bdaddr_type, s8 rssi, u32 flags,
1126 struct discovery_state *d = &hdev->discovery;
1128 bacpy(&d->last_adv_addr, bdaddr);
1129 d->last_adv_addr_type = bdaddr_type;
1130 d->last_adv_rssi = rssi;
1131 d->last_adv_flags = flags;
1132 memcpy(d->last_adv_data, data, len);
1133 d->last_adv_data_len = len;
1136 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1137 struct sk_buff *skb)
1139 struct hci_cp_le_set_scan_enable *cp;
1140 __u8 status = *((__u8 *) skb->data);
1142 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1147 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1151 switch (cp->enable) {
1152 case LE_SCAN_ENABLE:
1153 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1154 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1155 clear_pending_adv_report(hdev);
1158 case LE_SCAN_DISABLE:
1159 /* We do this here instead of when setting DISCOVERY_STOPPED
1160 * since the latter would potentially require waiting for
1161 * inquiry to stop too.
1163 if (has_pending_adv_report(hdev)) {
1164 struct discovery_state *d = &hdev->discovery;
1166 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1167 d->last_adv_addr_type, NULL,
1168 d->last_adv_rssi, d->last_adv_flags,
1170 d->last_adv_data_len, NULL, 0);
1173 /* Cancel this timer so that we don't try to disable scanning
1174 * when it's already disabled.
1176 cancel_delayed_work(&hdev->le_scan_disable);
1178 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1180 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1181 * interrupted scanning due to a connect request. Mark
1182 * therefore discovery as stopped. If this was not
1183 * because of a connect request advertising might have
1184 * been disabled because of active scanning, so
1185 * re-enable it again if necessary.
1187 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1189 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1190 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1191 hdev->discovery.state == DISCOVERY_FINDING)
1192 mgmt_reenable_advertising(hdev);
1197 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1202 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1203 struct sk_buff *skb)
1205 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1207 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1212 hdev->le_white_list_size = rp->size;
1215 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1216 struct sk_buff *skb)
1218 __u8 status = *((__u8 *) skb->data);
1220 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1225 hci_white_list_clear(hdev);
1228 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1229 struct sk_buff *skb)
1231 struct hci_cp_le_add_to_white_list *sent;
1232 __u8 status = *((__u8 *) skb->data);
1234 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1239 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1243 hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type);
1246 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1247 struct sk_buff *skb)
1249 struct hci_cp_le_del_from_white_list *sent;
1250 __u8 status = *((__u8 *) skb->data);
1252 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1257 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1261 hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type);
1264 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1265 struct sk_buff *skb)
1267 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1269 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1274 memcpy(hdev->le_states, rp->le_states, 8);
1277 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1278 struct sk_buff *skb)
1280 struct hci_cp_write_le_host_supported *sent;
1281 __u8 status = *((__u8 *) skb->data);
1283 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1288 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1293 hdev->features[1][0] |= LMP_HOST_LE;
1294 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1296 hdev->features[1][0] &= ~LMP_HOST_LE;
1297 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1298 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1302 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1304 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1307 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1309 struct hci_cp_le_set_adv_param *cp;
1310 u8 status = *((u8 *) skb->data);
1312 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1317 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1322 hdev->adv_addr_type = cp->own_address_type;
1323 hci_dev_unlock(hdev);
1326 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1327 struct sk_buff *skb)
1329 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1331 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1332 hdev->name, rp->status, rp->phy_handle);
1337 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1340 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1342 struct hci_rp_read_rssi *rp = (void *) skb->data;
1343 struct hci_conn *conn;
1345 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1352 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1354 conn->rssi = rp->rssi;
1356 hci_dev_unlock(hdev);
1359 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1361 struct hci_cp_read_tx_power *sent;
1362 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1363 struct hci_conn *conn;
1365 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1370 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1376 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1380 switch (sent->type) {
1382 conn->tx_power = rp->tx_power;
1385 conn->max_tx_power = rp->tx_power;
1390 hci_dev_unlock(hdev);
1393 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1395 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1398 hci_conn_check_pending(hdev);
1402 set_bit(HCI_INQUIRY, &hdev->flags);
1405 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1407 struct hci_cp_create_conn *cp;
1408 struct hci_conn *conn;
1410 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1412 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1418 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1420 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1423 if (conn && conn->state == BT_CONNECT) {
1424 if (status != 0x0c || conn->attempt > 2) {
1425 conn->state = BT_CLOSED;
1426 hci_proto_connect_cfm(conn, status);
1429 conn->state = BT_CONNECT2;
1433 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1436 set_bit(HCI_CONN_MASTER, &conn->flags);
1438 BT_ERR("No memory for new connection");
1442 hci_dev_unlock(hdev);
1445 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1447 struct hci_cp_add_sco *cp;
1448 struct hci_conn *acl, *sco;
1451 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1456 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1460 handle = __le16_to_cpu(cp->handle);
1462 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1466 acl = hci_conn_hash_lookup_handle(hdev, handle);
1470 sco->state = BT_CLOSED;
1472 hci_proto_connect_cfm(sco, status);
1477 hci_dev_unlock(hdev);
1480 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1482 struct hci_cp_auth_requested *cp;
1483 struct hci_conn *conn;
1485 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1490 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1496 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1498 if (conn->state == BT_CONFIG) {
1499 hci_proto_connect_cfm(conn, status);
1500 hci_conn_drop(conn);
1504 hci_dev_unlock(hdev);
1507 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1509 struct hci_cp_set_conn_encrypt *cp;
1510 struct hci_conn *conn;
1512 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1517 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1523 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1525 if (conn->state == BT_CONFIG) {
1526 hci_proto_connect_cfm(conn, status);
1527 hci_conn_drop(conn);
1531 hci_dev_unlock(hdev);
1534 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1535 struct hci_conn *conn)
1537 if (conn->state != BT_CONFIG || !conn->out)
1540 if (conn->pending_sec_level == BT_SECURITY_SDP)
1543 /* Only request authentication for SSP connections or non-SSP
1544 * devices with sec_level MEDIUM or HIGH or if MITM protection
1547 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1548 conn->pending_sec_level != BT_SECURITY_FIPS &&
1549 conn->pending_sec_level != BT_SECURITY_HIGH &&
1550 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1556 static int hci_resolve_name(struct hci_dev *hdev,
1557 struct inquiry_entry *e)
1559 struct hci_cp_remote_name_req cp;
1561 memset(&cp, 0, sizeof(cp));
1563 bacpy(&cp.bdaddr, &e->data.bdaddr);
1564 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1565 cp.pscan_mode = e->data.pscan_mode;
1566 cp.clock_offset = e->data.clock_offset;
1568 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1571 static bool hci_resolve_next_name(struct hci_dev *hdev)
1573 struct discovery_state *discov = &hdev->discovery;
1574 struct inquiry_entry *e;
1576 if (list_empty(&discov->resolve))
1579 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1583 if (hci_resolve_name(hdev, e) == 0) {
1584 e->name_state = NAME_PENDING;
1591 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1592 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1594 struct discovery_state *discov = &hdev->discovery;
1595 struct inquiry_entry *e;
1597 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1598 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1599 name_len, conn->dev_class);
1601 if (discov->state == DISCOVERY_STOPPED)
1604 if (discov->state == DISCOVERY_STOPPING)
1605 goto discov_complete;
1607 if (discov->state != DISCOVERY_RESOLVING)
1610 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1611 /* If the device was not found in a list of found devices names of which
1612 * are pending. there is no need to continue resolving a next name as it
1613 * will be done upon receiving another Remote Name Request Complete
1620 e->name_state = NAME_KNOWN;
1621 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1622 e->data.rssi, name, name_len);
1624 e->name_state = NAME_NOT_KNOWN;
1627 if (hci_resolve_next_name(hdev))
1631 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1634 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1636 struct hci_cp_remote_name_req *cp;
1637 struct hci_conn *conn;
1639 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1641 /* If successful wait for the name req complete event before
1642 * checking for the need to do authentication */
1646 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1652 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1654 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1655 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1660 if (!hci_outgoing_auth_needed(hdev, conn))
1663 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1664 struct hci_cp_auth_requested auth_cp;
1666 auth_cp.handle = __cpu_to_le16(conn->handle);
1667 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1668 sizeof(auth_cp), &auth_cp);
1672 hci_dev_unlock(hdev);
1675 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1677 struct hci_cp_read_remote_features *cp;
1678 struct hci_conn *conn;
1680 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1685 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1691 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1693 if (conn->state == BT_CONFIG) {
1694 hci_proto_connect_cfm(conn, status);
1695 hci_conn_drop(conn);
1699 hci_dev_unlock(hdev);
1702 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1704 struct hci_cp_read_remote_ext_features *cp;
1705 struct hci_conn *conn;
1707 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1712 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1718 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1720 if (conn->state == BT_CONFIG) {
1721 hci_proto_connect_cfm(conn, status);
1722 hci_conn_drop(conn);
1726 hci_dev_unlock(hdev);
1729 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1731 struct hci_cp_setup_sync_conn *cp;
1732 struct hci_conn *acl, *sco;
1735 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1740 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1744 handle = __le16_to_cpu(cp->handle);
1746 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1750 acl = hci_conn_hash_lookup_handle(hdev, handle);
1754 sco->state = BT_CLOSED;
1756 hci_proto_connect_cfm(sco, status);
1761 hci_dev_unlock(hdev);
1764 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1766 struct hci_cp_sniff_mode *cp;
1767 struct hci_conn *conn;
1769 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1774 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1780 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1782 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1784 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1785 hci_sco_setup(conn, status);
1788 hci_dev_unlock(hdev);
1791 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1793 struct hci_cp_exit_sniff_mode *cp;
1794 struct hci_conn *conn;
1796 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1801 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1807 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1809 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1811 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1812 hci_sco_setup(conn, status);
1815 hci_dev_unlock(hdev);
1818 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1820 struct hci_cp_disconnect *cp;
1821 struct hci_conn *conn;
1826 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1832 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1834 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1835 conn->dst_type, status);
1837 hci_dev_unlock(hdev);
1840 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1842 struct hci_cp_create_phy_link *cp;
1844 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1846 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1853 struct hci_conn *hcon;
1855 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1859 amp_write_remote_assoc(hdev, cp->phy_handle);
1862 hci_dev_unlock(hdev);
1865 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1867 struct hci_cp_accept_phy_link *cp;
1869 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1874 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1878 amp_write_remote_assoc(hdev, cp->phy_handle);
1881 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1883 struct hci_cp_le_create_conn *cp;
1884 struct hci_conn *conn;
1886 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1888 /* All connection failure handling is taken care of by the
1889 * hci_le_conn_failed function which is triggered by the HCI
1890 * request completion callbacks used for connecting.
1895 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1901 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1905 /* Store the initiator and responder address information which
1906 * is needed for SMP. These values will not change during the
1907 * lifetime of the connection.
1909 conn->init_addr_type = cp->own_address_type;
1910 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1911 bacpy(&conn->init_addr, &hdev->random_addr);
1913 bacpy(&conn->init_addr, &hdev->bdaddr);
1915 conn->resp_addr_type = cp->peer_addr_type;
1916 bacpy(&conn->resp_addr, &cp->peer_addr);
1918 /* We don't want the connection attempt to stick around
1919 * indefinitely since LE doesn't have a page timeout concept
1920 * like BR/EDR. Set a timer for any connection that doesn't use
1921 * the white list for connecting.
1923 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1924 queue_delayed_work(conn->hdev->workqueue,
1925 &conn->le_conn_timeout,
1926 conn->conn_timeout);
1929 hci_dev_unlock(hdev);
1932 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1934 struct hci_cp_le_start_enc *cp;
1935 struct hci_conn *conn;
1937 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1944 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1948 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1952 if (conn->state != BT_CONNECTED)
1955 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1956 hci_conn_drop(conn);
1959 hci_dev_unlock(hdev);
1962 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1964 __u8 status = *((__u8 *) skb->data);
1965 struct discovery_state *discov = &hdev->discovery;
1966 struct inquiry_entry *e;
1968 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1970 hci_conn_check_pending(hdev);
1972 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1975 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1976 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1978 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1983 if (discov->state != DISCOVERY_FINDING)
1986 if (list_empty(&discov->resolve)) {
1987 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1991 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1992 if (e && hci_resolve_name(hdev, e) == 0) {
1993 e->name_state = NAME_PENDING;
1994 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1996 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2000 hci_dev_unlock(hdev);
2003 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2005 struct inquiry_data data;
2006 struct inquiry_info *info = (void *) (skb->data + 1);
2007 int num_rsp = *((__u8 *) skb->data);
2009 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2014 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2019 for (; num_rsp; num_rsp--, info++) {
2022 bacpy(&data.bdaddr, &info->bdaddr);
2023 data.pscan_rep_mode = info->pscan_rep_mode;
2024 data.pscan_period_mode = info->pscan_period_mode;
2025 data.pscan_mode = info->pscan_mode;
2026 memcpy(data.dev_class, info->dev_class, 3);
2027 data.clock_offset = info->clock_offset;
2029 data.ssp_mode = 0x00;
2031 flags = hci_inquiry_cache_update(hdev, &data, false);
2033 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2034 info->dev_class, 0, flags, NULL, 0, NULL, 0);
2037 hci_dev_unlock(hdev);
2040 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2042 struct hci_ev_conn_complete *ev = (void *) skb->data;
2043 struct hci_conn *conn;
2045 BT_DBG("%s", hdev->name);
2049 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2051 if (ev->link_type != SCO_LINK)
2054 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2058 conn->type = SCO_LINK;
2062 conn->handle = __le16_to_cpu(ev->handle);
2064 if (conn->type == ACL_LINK) {
2065 conn->state = BT_CONFIG;
2066 hci_conn_hold(conn);
2068 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2069 !hci_find_link_key(hdev, &ev->bdaddr))
2070 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2072 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2074 conn->state = BT_CONNECTED;
2076 hci_conn_add_sysfs(conn);
2078 if (test_bit(HCI_AUTH, &hdev->flags))
2079 set_bit(HCI_CONN_AUTH, &conn->flags);
2081 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2082 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2084 /* Get remote features */
2085 if (conn->type == ACL_LINK) {
2086 struct hci_cp_read_remote_features cp;
2087 cp.handle = ev->handle;
2088 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2092 /* Set packet type for incoming connection */
2093 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2094 struct hci_cp_change_conn_ptype cp;
2095 cp.handle = ev->handle;
2096 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2097 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2101 conn->state = BT_CLOSED;
2102 if (conn->type == ACL_LINK)
2103 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2104 conn->dst_type, ev->status);
2107 if (conn->type == ACL_LINK)
2108 hci_sco_setup(conn, ev->status);
2111 hci_proto_connect_cfm(conn, ev->status);
2113 } else if (ev->link_type != ACL_LINK)
2114 hci_proto_connect_cfm(conn, ev->status);
2117 hci_dev_unlock(hdev);
2119 hci_conn_check_pending(hdev);
2122 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2124 struct hci_ev_conn_request *ev = (void *) skb->data;
2125 int mask = hdev->link_mode;
2128 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2131 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2134 if ((mask & HCI_LM_ACCEPT) &&
2135 !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
2136 /* Connection accepted */
2137 struct inquiry_entry *ie;
2138 struct hci_conn *conn;
2142 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2144 memcpy(ie->data.dev_class, ev->dev_class, 3);
2146 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2149 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2151 BT_ERR("No memory for new connection");
2152 hci_dev_unlock(hdev);
2157 memcpy(conn->dev_class, ev->dev_class, 3);
2159 hci_dev_unlock(hdev);
2161 if (ev->link_type == ACL_LINK ||
2162 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2163 struct hci_cp_accept_conn_req cp;
2164 conn->state = BT_CONNECT;
2166 bacpy(&cp.bdaddr, &ev->bdaddr);
2168 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2169 cp.role = 0x00; /* Become master */
2171 cp.role = 0x01; /* Remain slave */
2173 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
2175 } else if (!(flags & HCI_PROTO_DEFER)) {
2176 struct hci_cp_accept_sync_conn_req cp;
2177 conn->state = BT_CONNECT;
2179 bacpy(&cp.bdaddr, &ev->bdaddr);
2180 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2182 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2183 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2184 cp.max_latency = cpu_to_le16(0xffff);
2185 cp.content_format = cpu_to_le16(hdev->voice_setting);
2186 cp.retrans_effort = 0xff;
2188 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2191 conn->state = BT_CONNECT2;
2192 hci_proto_connect_cfm(conn, 0);
2195 /* Connection rejected */
2196 struct hci_cp_reject_conn_req cp;
2198 bacpy(&cp.bdaddr, &ev->bdaddr);
2199 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2200 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2204 static u8 hci_to_mgmt_reason(u8 err)
2207 case HCI_ERROR_CONNECTION_TIMEOUT:
2208 return MGMT_DEV_DISCONN_TIMEOUT;
2209 case HCI_ERROR_REMOTE_USER_TERM:
2210 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2211 case HCI_ERROR_REMOTE_POWER_OFF:
2212 return MGMT_DEV_DISCONN_REMOTE;
2213 case HCI_ERROR_LOCAL_HOST_TERM:
2214 return MGMT_DEV_DISCONN_LOCAL_HOST;
2216 return MGMT_DEV_DISCONN_UNKNOWN;
2220 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2222 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2223 u8 reason = hci_to_mgmt_reason(ev->reason);
2224 struct hci_conn_params *params;
2225 struct hci_conn *conn;
2226 bool mgmt_connected;
2229 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2233 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2238 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2239 conn->dst_type, ev->status);
2243 conn->state = BT_CLOSED;
2245 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2246 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2247 reason, mgmt_connected);
2249 if (conn->type == ACL_LINK &&
2250 test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2251 hci_remove_link_key(hdev, &conn->dst);
2253 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2255 switch (params->auto_connect) {
2256 case HCI_AUTO_CONN_LINK_LOSS:
2257 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2261 case HCI_AUTO_CONN_ALWAYS:
2262 list_del_init(¶ms->action);
2263 list_add(¶ms->action, &hdev->pend_le_conns);
2264 hci_update_background_scan(hdev);
2274 hci_proto_disconn_cfm(conn, ev->reason);
2277 /* Re-enable advertising if necessary, since it might
2278 * have been disabled by the connection. From the
2279 * HCI_LE_Set_Advertise_Enable command description in
2280 * the core specification (v4.0):
2281 * "The Controller shall continue advertising until the Host
2282 * issues an LE_Set_Advertise_Enable command with
2283 * Advertising_Enable set to 0x00 (Advertising is disabled)
2284 * or until a connection is created or until the Advertising
2285 * is timed out due to Directed Advertising."
2287 if (type == LE_LINK)
2288 mgmt_reenable_advertising(hdev);
2291 hci_dev_unlock(hdev);
2294 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2296 struct hci_ev_auth_complete *ev = (void *) skb->data;
2297 struct hci_conn *conn;
2299 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2303 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2308 if (!hci_conn_ssp_enabled(conn) &&
2309 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2310 BT_INFO("re-auth of legacy device is not possible.");
2312 set_bit(HCI_CONN_AUTH, &conn->flags);
2313 conn->sec_level = conn->pending_sec_level;
2316 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2320 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2321 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2323 if (conn->state == BT_CONFIG) {
2324 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2325 struct hci_cp_set_conn_encrypt cp;
2326 cp.handle = ev->handle;
2328 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2331 conn->state = BT_CONNECTED;
2332 hci_proto_connect_cfm(conn, ev->status);
2333 hci_conn_drop(conn);
2336 hci_auth_cfm(conn, ev->status);
2338 hci_conn_hold(conn);
2339 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2340 hci_conn_drop(conn);
2343 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2345 struct hci_cp_set_conn_encrypt cp;
2346 cp.handle = ev->handle;
2348 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2351 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2352 hci_encrypt_cfm(conn, ev->status, 0x00);
2357 hci_dev_unlock(hdev);
2360 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2362 struct hci_ev_remote_name *ev = (void *) skb->data;
2363 struct hci_conn *conn;
2365 BT_DBG("%s", hdev->name);
2367 hci_conn_check_pending(hdev);
2371 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2373 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2376 if (ev->status == 0)
2377 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2378 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2380 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2386 if (!hci_outgoing_auth_needed(hdev, conn))
2389 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2390 struct hci_cp_auth_requested cp;
2391 cp.handle = __cpu_to_le16(conn->handle);
2392 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2396 hci_dev_unlock(hdev);
2399 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2401 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2402 struct hci_conn *conn;
2404 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2408 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2414 /* Encryption implies authentication */
2415 set_bit(HCI_CONN_AUTH, &conn->flags);
2416 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2417 conn->sec_level = conn->pending_sec_level;
2419 /* P-256 authentication key implies FIPS */
2420 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2421 set_bit(HCI_CONN_FIPS, &conn->flags);
2423 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2424 conn->type == LE_LINK)
2425 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2427 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2428 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2432 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2434 if (ev->status && conn->state == BT_CONNECTED) {
2435 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2436 hci_conn_drop(conn);
2440 if (conn->state == BT_CONFIG) {
2442 conn->state = BT_CONNECTED;
2444 /* In Secure Connections Only mode, do not allow any
2445 * connections that are not encrypted with AES-CCM
2446 * using a P-256 authenticated combination key.
2448 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2449 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2450 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2451 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2452 hci_conn_drop(conn);
2456 hci_proto_connect_cfm(conn, ev->status);
2457 hci_conn_drop(conn);
2459 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2462 hci_dev_unlock(hdev);
2465 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2466 struct sk_buff *skb)
2468 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2469 struct hci_conn *conn;
2471 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2475 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2478 set_bit(HCI_CONN_SECURE, &conn->flags);
2480 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2482 hci_key_change_cfm(conn, ev->status);
2485 hci_dev_unlock(hdev);
2488 static void hci_remote_features_evt(struct hci_dev *hdev,
2489 struct sk_buff *skb)
2491 struct hci_ev_remote_features *ev = (void *) skb->data;
2492 struct hci_conn *conn;
2494 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2498 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2503 memcpy(conn->features[0], ev->features, 8);
2505 if (conn->state != BT_CONFIG)
2508 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2509 struct hci_cp_read_remote_ext_features cp;
2510 cp.handle = ev->handle;
2512 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2517 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2518 struct hci_cp_remote_name_req cp;
2519 memset(&cp, 0, sizeof(cp));
2520 bacpy(&cp.bdaddr, &conn->dst);
2521 cp.pscan_rep_mode = 0x02;
2522 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2523 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2524 mgmt_device_connected(hdev, &conn->dst, conn->type,
2525 conn->dst_type, 0, NULL, 0,
2528 if (!hci_outgoing_auth_needed(hdev, conn)) {
2529 conn->state = BT_CONNECTED;
2530 hci_proto_connect_cfm(conn, ev->status);
2531 hci_conn_drop(conn);
2535 hci_dev_unlock(hdev);
2538 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2540 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2541 u8 status = skb->data[sizeof(*ev)];
2544 skb_pull(skb, sizeof(*ev));
2546 opcode = __le16_to_cpu(ev->opcode);
2549 case HCI_OP_INQUIRY_CANCEL:
2550 hci_cc_inquiry_cancel(hdev, skb);
2553 case HCI_OP_PERIODIC_INQ:
2554 hci_cc_periodic_inq(hdev, skb);
2557 case HCI_OP_EXIT_PERIODIC_INQ:
2558 hci_cc_exit_periodic_inq(hdev, skb);
2561 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2562 hci_cc_remote_name_req_cancel(hdev, skb);
2565 case HCI_OP_ROLE_DISCOVERY:
2566 hci_cc_role_discovery(hdev, skb);
2569 case HCI_OP_READ_LINK_POLICY:
2570 hci_cc_read_link_policy(hdev, skb);
2573 case HCI_OP_WRITE_LINK_POLICY:
2574 hci_cc_write_link_policy(hdev, skb);
2577 case HCI_OP_READ_DEF_LINK_POLICY:
2578 hci_cc_read_def_link_policy(hdev, skb);
2581 case HCI_OP_WRITE_DEF_LINK_POLICY:
2582 hci_cc_write_def_link_policy(hdev, skb);
2586 hci_cc_reset(hdev, skb);
2589 case HCI_OP_WRITE_LOCAL_NAME:
2590 hci_cc_write_local_name(hdev, skb);
2593 case HCI_OP_READ_LOCAL_NAME:
2594 hci_cc_read_local_name(hdev, skb);
2597 case HCI_OP_WRITE_AUTH_ENABLE:
2598 hci_cc_write_auth_enable(hdev, skb);
2601 case HCI_OP_WRITE_ENCRYPT_MODE:
2602 hci_cc_write_encrypt_mode(hdev, skb);
2605 case HCI_OP_WRITE_SCAN_ENABLE:
2606 hci_cc_write_scan_enable(hdev, skb);
2609 case HCI_OP_READ_CLASS_OF_DEV:
2610 hci_cc_read_class_of_dev(hdev, skb);
2613 case HCI_OP_WRITE_CLASS_OF_DEV:
2614 hci_cc_write_class_of_dev(hdev, skb);
2617 case HCI_OP_READ_VOICE_SETTING:
2618 hci_cc_read_voice_setting(hdev, skb);
2621 case HCI_OP_WRITE_VOICE_SETTING:
2622 hci_cc_write_voice_setting(hdev, skb);
2625 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2626 hci_cc_read_num_supported_iac(hdev, skb);
2629 case HCI_OP_WRITE_SSP_MODE:
2630 hci_cc_write_ssp_mode(hdev, skb);
2633 case HCI_OP_WRITE_SC_SUPPORT:
2634 hci_cc_write_sc_support(hdev, skb);
2637 case HCI_OP_READ_LOCAL_VERSION:
2638 hci_cc_read_local_version(hdev, skb);
2641 case HCI_OP_READ_LOCAL_COMMANDS:
2642 hci_cc_read_local_commands(hdev, skb);
2645 case HCI_OP_READ_LOCAL_FEATURES:
2646 hci_cc_read_local_features(hdev, skb);
2649 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2650 hci_cc_read_local_ext_features(hdev, skb);
2653 case HCI_OP_READ_BUFFER_SIZE:
2654 hci_cc_read_buffer_size(hdev, skb);
2657 case HCI_OP_READ_BD_ADDR:
2658 hci_cc_read_bd_addr(hdev, skb);
2661 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2662 hci_cc_read_page_scan_activity(hdev, skb);
2665 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2666 hci_cc_write_page_scan_activity(hdev, skb);
2669 case HCI_OP_READ_PAGE_SCAN_TYPE:
2670 hci_cc_read_page_scan_type(hdev, skb);
2673 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2674 hci_cc_write_page_scan_type(hdev, skb);
2677 case HCI_OP_READ_DATA_BLOCK_SIZE:
2678 hci_cc_read_data_block_size(hdev, skb);
2681 case HCI_OP_READ_FLOW_CONTROL_MODE:
2682 hci_cc_read_flow_control_mode(hdev, skb);
2685 case HCI_OP_READ_LOCAL_AMP_INFO:
2686 hci_cc_read_local_amp_info(hdev, skb);
2689 case HCI_OP_READ_CLOCK:
2690 hci_cc_read_clock(hdev, skb);
2693 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2694 hci_cc_read_local_amp_assoc(hdev, skb);
2697 case HCI_OP_READ_INQ_RSP_TX_POWER:
2698 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2701 case HCI_OP_PIN_CODE_REPLY:
2702 hci_cc_pin_code_reply(hdev, skb);
2705 case HCI_OP_PIN_CODE_NEG_REPLY:
2706 hci_cc_pin_code_neg_reply(hdev, skb);
2709 case HCI_OP_READ_LOCAL_OOB_DATA:
2710 hci_cc_read_local_oob_data(hdev, skb);
2713 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2714 hci_cc_read_local_oob_ext_data(hdev, skb);
2717 case HCI_OP_LE_READ_BUFFER_SIZE:
2718 hci_cc_le_read_buffer_size(hdev, skb);
2721 case HCI_OP_LE_READ_LOCAL_FEATURES:
2722 hci_cc_le_read_local_features(hdev, skb);
2725 case HCI_OP_LE_READ_ADV_TX_POWER:
2726 hci_cc_le_read_adv_tx_power(hdev, skb);
2729 case HCI_OP_USER_CONFIRM_REPLY:
2730 hci_cc_user_confirm_reply(hdev, skb);
2733 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2734 hci_cc_user_confirm_neg_reply(hdev, skb);
2737 case HCI_OP_USER_PASSKEY_REPLY:
2738 hci_cc_user_passkey_reply(hdev, skb);
2741 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2742 hci_cc_user_passkey_neg_reply(hdev, skb);
2745 case HCI_OP_LE_SET_RANDOM_ADDR:
2746 hci_cc_le_set_random_addr(hdev, skb);
2749 case HCI_OP_LE_SET_ADV_ENABLE:
2750 hci_cc_le_set_adv_enable(hdev, skb);
2753 case HCI_OP_LE_SET_SCAN_PARAM:
2754 hci_cc_le_set_scan_param(hdev, skb);
2757 case HCI_OP_LE_SET_SCAN_ENABLE:
2758 hci_cc_le_set_scan_enable(hdev, skb);
2761 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2762 hci_cc_le_read_white_list_size(hdev, skb);
2765 case HCI_OP_LE_CLEAR_WHITE_LIST:
2766 hci_cc_le_clear_white_list(hdev, skb);
2769 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2770 hci_cc_le_add_to_white_list(hdev, skb);
2773 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2774 hci_cc_le_del_from_white_list(hdev, skb);
2777 case HCI_OP_LE_READ_SUPPORTED_STATES:
2778 hci_cc_le_read_supported_states(hdev, skb);
2781 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2782 hci_cc_write_le_host_supported(hdev, skb);
2785 case HCI_OP_LE_SET_ADV_PARAM:
2786 hci_cc_set_adv_param(hdev, skb);
2789 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2790 hci_cc_write_remote_amp_assoc(hdev, skb);
2793 case HCI_OP_READ_RSSI:
2794 hci_cc_read_rssi(hdev, skb);
2797 case HCI_OP_READ_TX_POWER:
2798 hci_cc_read_tx_power(hdev, skb);
2802 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2806 if (opcode != HCI_OP_NOP)
2807 cancel_delayed_work(&hdev->cmd_timer);
2809 hci_req_cmd_complete(hdev, opcode, status);
2811 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2812 atomic_set(&hdev->cmd_cnt, 1);
2813 if (!skb_queue_empty(&hdev->cmd_q))
2814 queue_work(hdev->workqueue, &hdev->cmd_work);
2818 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2820 struct hci_ev_cmd_status *ev = (void *) skb->data;
2823 skb_pull(skb, sizeof(*ev));
2825 opcode = __le16_to_cpu(ev->opcode);
2828 case HCI_OP_INQUIRY:
2829 hci_cs_inquiry(hdev, ev->status);
2832 case HCI_OP_CREATE_CONN:
2833 hci_cs_create_conn(hdev, ev->status);
2836 case HCI_OP_ADD_SCO:
2837 hci_cs_add_sco(hdev, ev->status);
2840 case HCI_OP_AUTH_REQUESTED:
2841 hci_cs_auth_requested(hdev, ev->status);
2844 case HCI_OP_SET_CONN_ENCRYPT:
2845 hci_cs_set_conn_encrypt(hdev, ev->status);
2848 case HCI_OP_REMOTE_NAME_REQ:
2849 hci_cs_remote_name_req(hdev, ev->status);
2852 case HCI_OP_READ_REMOTE_FEATURES:
2853 hci_cs_read_remote_features(hdev, ev->status);
2856 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2857 hci_cs_read_remote_ext_features(hdev, ev->status);
2860 case HCI_OP_SETUP_SYNC_CONN:
2861 hci_cs_setup_sync_conn(hdev, ev->status);
2864 case HCI_OP_SNIFF_MODE:
2865 hci_cs_sniff_mode(hdev, ev->status);
2868 case HCI_OP_EXIT_SNIFF_MODE:
2869 hci_cs_exit_sniff_mode(hdev, ev->status);
2872 case HCI_OP_DISCONNECT:
2873 hci_cs_disconnect(hdev, ev->status);
2876 case HCI_OP_CREATE_PHY_LINK:
2877 hci_cs_create_phylink(hdev, ev->status);
2880 case HCI_OP_ACCEPT_PHY_LINK:
2881 hci_cs_accept_phylink(hdev, ev->status);
2884 case HCI_OP_LE_CREATE_CONN:
2885 hci_cs_le_create_conn(hdev, ev->status);
2888 case HCI_OP_LE_START_ENC:
2889 hci_cs_le_start_enc(hdev, ev->status);
2893 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2897 if (opcode != HCI_OP_NOP)
2898 cancel_delayed_work(&hdev->cmd_timer);
2901 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2902 hci_req_cmd_complete(hdev, opcode, ev->status);
2904 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2905 atomic_set(&hdev->cmd_cnt, 1);
2906 if (!skb_queue_empty(&hdev->cmd_q))
2907 queue_work(hdev->workqueue, &hdev->cmd_work);
2911 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2913 struct hci_ev_role_change *ev = (void *) skb->data;
2914 struct hci_conn *conn;
2916 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2920 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2924 clear_bit(HCI_CONN_MASTER, &conn->flags);
2926 set_bit(HCI_CONN_MASTER, &conn->flags);
2929 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2931 hci_role_switch_cfm(conn, ev->status, ev->role);
2934 hci_dev_unlock(hdev);
2937 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2939 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2942 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2943 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2947 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2948 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2949 BT_DBG("%s bad parameters", hdev->name);
2953 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2955 for (i = 0; i < ev->num_hndl; i++) {
2956 struct hci_comp_pkts_info *info = &ev->handles[i];
2957 struct hci_conn *conn;
2958 __u16 handle, count;
2960 handle = __le16_to_cpu(info->handle);
2961 count = __le16_to_cpu(info->count);
2963 conn = hci_conn_hash_lookup_handle(hdev, handle);
2967 conn->sent -= count;
2969 switch (conn->type) {
2971 hdev->acl_cnt += count;
2972 if (hdev->acl_cnt > hdev->acl_pkts)
2973 hdev->acl_cnt = hdev->acl_pkts;
2977 if (hdev->le_pkts) {
2978 hdev->le_cnt += count;
2979 if (hdev->le_cnt > hdev->le_pkts)
2980 hdev->le_cnt = hdev->le_pkts;
2982 hdev->acl_cnt += count;
2983 if (hdev->acl_cnt > hdev->acl_pkts)
2984 hdev->acl_cnt = hdev->acl_pkts;
2989 hdev->sco_cnt += count;
2990 if (hdev->sco_cnt > hdev->sco_pkts)
2991 hdev->sco_cnt = hdev->sco_pkts;
2995 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3000 queue_work(hdev->workqueue, &hdev->tx_work);
3003 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3006 struct hci_chan *chan;
3008 switch (hdev->dev_type) {
3010 return hci_conn_hash_lookup_handle(hdev, handle);
3012 chan = hci_chan_lookup_handle(hdev, handle);
3017 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3024 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3026 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3029 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3030 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3034 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3035 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3036 BT_DBG("%s bad parameters", hdev->name);
3040 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3043 for (i = 0; i < ev->num_hndl; i++) {
3044 struct hci_comp_blocks_info *info = &ev->handles[i];
3045 struct hci_conn *conn = NULL;
3046 __u16 handle, block_count;
3048 handle = __le16_to_cpu(info->handle);
3049 block_count = __le16_to_cpu(info->blocks);
3051 conn = __hci_conn_lookup_handle(hdev, handle);
3055 conn->sent -= block_count;
3057 switch (conn->type) {
3060 hdev->block_cnt += block_count;
3061 if (hdev->block_cnt > hdev->num_blocks)
3062 hdev->block_cnt = hdev->num_blocks;
3066 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3071 queue_work(hdev->workqueue, &hdev->tx_work);
3074 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3076 struct hci_ev_mode_change *ev = (void *) skb->data;
3077 struct hci_conn *conn;
3079 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3083 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3085 conn->mode = ev->mode;
3087 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3089 if (conn->mode == HCI_CM_ACTIVE)
3090 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3092 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3095 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3096 hci_sco_setup(conn, ev->status);
3099 hci_dev_unlock(hdev);
3102 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3104 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3105 struct hci_conn *conn;
3107 BT_DBG("%s", hdev->name);
3111 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3115 if (conn->state == BT_CONNECTED) {
3116 hci_conn_hold(conn);
3117 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3118 hci_conn_drop(conn);
3121 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
3122 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3123 sizeof(ev->bdaddr), &ev->bdaddr);
3124 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3127 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3132 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3136 hci_dev_unlock(hdev);
3139 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3141 struct hci_ev_link_key_req *ev = (void *) skb->data;
3142 struct hci_cp_link_key_reply cp;
3143 struct hci_conn *conn;
3144 struct link_key *key;
3146 BT_DBG("%s", hdev->name);
3148 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3153 key = hci_find_link_key(hdev, &ev->bdaddr);
3155 BT_DBG("%s link key not found for %pMR", hdev->name,
3160 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3163 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3165 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3166 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3167 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3168 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3172 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3173 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3174 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3175 BT_DBG("%s ignoring key unauthenticated for high security",
3180 conn->key_type = key->type;
3181 conn->pin_length = key->pin_len;
3184 bacpy(&cp.bdaddr, &ev->bdaddr);
3185 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3187 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3189 hci_dev_unlock(hdev);
3194 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3195 hci_dev_unlock(hdev);
3198 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3200 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3201 struct hci_conn *conn;
3202 struct link_key *key;
3206 BT_DBG("%s", hdev->name);
3210 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3212 hci_conn_hold(conn);
3213 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3214 pin_len = conn->pin_length;
3216 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3217 conn->key_type = ev->key_type;
3219 hci_conn_drop(conn);
3222 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3225 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3226 ev->key_type, pin_len, &persistent);
3230 mgmt_new_link_key(hdev, key, persistent);
3232 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3233 * is set. If it's not set simply remove the key from the kernel
3234 * list (we've still notified user space about it but with
3235 * store_hint being 0).
3237 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3238 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3239 list_del(&key->list);
3243 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3245 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3249 hci_dev_unlock(hdev);
3252 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3254 struct hci_ev_clock_offset *ev = (void *) skb->data;
3255 struct hci_conn *conn;
3257 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3261 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3262 if (conn && !ev->status) {
3263 struct inquiry_entry *ie;
3265 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3267 ie->data.clock_offset = ev->clock_offset;
3268 ie->timestamp = jiffies;
3272 hci_dev_unlock(hdev);
3275 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3277 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3278 struct hci_conn *conn;
3280 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3284 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3285 if (conn && !ev->status)
3286 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3288 hci_dev_unlock(hdev);
3291 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3293 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3294 struct inquiry_entry *ie;
3296 BT_DBG("%s", hdev->name);
3300 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3302 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3303 ie->timestamp = jiffies;
3306 hci_dev_unlock(hdev);
3309 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3310 struct sk_buff *skb)
3312 struct inquiry_data data;
3313 int num_rsp = *((__u8 *) skb->data);
3315 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3320 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3325 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3326 struct inquiry_info_with_rssi_and_pscan_mode *info;
3327 info = (void *) (skb->data + 1);
3329 for (; num_rsp; num_rsp--, info++) {
3332 bacpy(&data.bdaddr, &info->bdaddr);
3333 data.pscan_rep_mode = info->pscan_rep_mode;
3334 data.pscan_period_mode = info->pscan_period_mode;
3335 data.pscan_mode = info->pscan_mode;
3336 memcpy(data.dev_class, info->dev_class, 3);
3337 data.clock_offset = info->clock_offset;
3338 data.rssi = info->rssi;
3339 data.ssp_mode = 0x00;
3341 flags = hci_inquiry_cache_update(hdev, &data, false);
3343 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3344 info->dev_class, info->rssi,
3345 flags, NULL, 0, NULL, 0);
3348 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3350 for (; num_rsp; num_rsp--, info++) {
3353 bacpy(&data.bdaddr, &info->bdaddr);
3354 data.pscan_rep_mode = info->pscan_rep_mode;
3355 data.pscan_period_mode = info->pscan_period_mode;
3356 data.pscan_mode = 0x00;
3357 memcpy(data.dev_class, info->dev_class, 3);
3358 data.clock_offset = info->clock_offset;
3359 data.rssi = info->rssi;
3360 data.ssp_mode = 0x00;
3362 flags = hci_inquiry_cache_update(hdev, &data, false);
3364 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3365 info->dev_class, info->rssi,
3366 flags, NULL, 0, NULL, 0);
3370 hci_dev_unlock(hdev);
3373 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3374 struct sk_buff *skb)
3376 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3377 struct hci_conn *conn;
3379 BT_DBG("%s", hdev->name);
3383 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3387 if (ev->page < HCI_MAX_PAGES)
3388 memcpy(conn->features[ev->page], ev->features, 8);
3390 if (!ev->status && ev->page == 0x01) {
3391 struct inquiry_entry *ie;
3393 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3395 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3397 if (ev->features[0] & LMP_HOST_SSP) {
3398 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3400 /* It is mandatory by the Bluetooth specification that
3401 * Extended Inquiry Results are only used when Secure
3402 * Simple Pairing is enabled, but some devices violate
3405 * To make these devices work, the internal SSP
3406 * enabled flag needs to be cleared if the remote host
3407 * features do not indicate SSP support */
3408 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3411 if (ev->features[0] & LMP_HOST_SC)
3412 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3415 if (conn->state != BT_CONFIG)
3418 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3419 struct hci_cp_remote_name_req cp;
3420 memset(&cp, 0, sizeof(cp));
3421 bacpy(&cp.bdaddr, &conn->dst);
3422 cp.pscan_rep_mode = 0x02;
3423 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3424 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3425 mgmt_device_connected(hdev, &conn->dst, conn->type,
3426 conn->dst_type, 0, NULL, 0,
3429 if (!hci_outgoing_auth_needed(hdev, conn)) {
3430 conn->state = BT_CONNECTED;
3431 hci_proto_connect_cfm(conn, ev->status);
3432 hci_conn_drop(conn);
3436 hci_dev_unlock(hdev);
3439 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3440 struct sk_buff *skb)
3442 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3443 struct hci_conn *conn;
3445 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3449 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3451 if (ev->link_type == ESCO_LINK)
3454 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3458 conn->type = SCO_LINK;
3461 switch (ev->status) {
3463 conn->handle = __le16_to_cpu(ev->handle);
3464 conn->state = BT_CONNECTED;
3466 hci_conn_add_sysfs(conn);
3469 case 0x10: /* Connection Accept Timeout */
3470 case 0x0d: /* Connection Rejected due to Limited Resources */
3471 case 0x11: /* Unsupported Feature or Parameter Value */
3472 case 0x1c: /* SCO interval rejected */
3473 case 0x1a: /* Unsupported Remote Feature */
3474 case 0x1f: /* Unspecified error */
3475 case 0x20: /* Unsupported LMP Parameter value */
3477 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3478 (hdev->esco_type & EDR_ESCO_MASK);
3479 if (hci_setup_sync(conn, conn->link->handle))
3485 conn->state = BT_CLOSED;
3489 hci_proto_connect_cfm(conn, ev->status);
3494 hci_dev_unlock(hdev);
3497 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3501 while (parsed < eir_len) {
3502 u8 field_len = eir[0];
3507 parsed += field_len + 1;
3508 eir += field_len + 1;
3514 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3515 struct sk_buff *skb)
3517 struct inquiry_data data;
3518 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3519 int num_rsp = *((__u8 *) skb->data);
3522 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3527 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3532 for (; num_rsp; num_rsp--, info++) {
3536 bacpy(&data.bdaddr, &info->bdaddr);
3537 data.pscan_rep_mode = info->pscan_rep_mode;
3538 data.pscan_period_mode = info->pscan_period_mode;
3539 data.pscan_mode = 0x00;
3540 memcpy(data.dev_class, info->dev_class, 3);
3541 data.clock_offset = info->clock_offset;
3542 data.rssi = info->rssi;
3543 data.ssp_mode = 0x01;
3545 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3546 name_known = eir_has_data_type(info->data,
3552 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3554 eir_len = eir_get_length(info->data, sizeof(info->data));
3556 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3557 info->dev_class, info->rssi,
3558 flags, info->data, eir_len, NULL, 0);
3561 hci_dev_unlock(hdev);
3564 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3565 struct sk_buff *skb)
3567 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3568 struct hci_conn *conn;
3570 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3571 __le16_to_cpu(ev->handle));
3575 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3579 /* For BR/EDR the necessary steps are taken through the
3580 * auth_complete event.
3582 if (conn->type != LE_LINK)
3586 conn->sec_level = conn->pending_sec_level;
3588 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3590 if (ev->status && conn->state == BT_CONNECTED) {
3591 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3592 hci_conn_drop(conn);
3596 if (conn->state == BT_CONFIG) {
3598 conn->state = BT_CONNECTED;
3600 hci_proto_connect_cfm(conn, ev->status);
3601 hci_conn_drop(conn);
3603 hci_auth_cfm(conn, ev->status);
3605 hci_conn_hold(conn);
3606 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3607 hci_conn_drop(conn);
3611 hci_dev_unlock(hdev);
3614 static u8 hci_get_auth_req(struct hci_conn *conn)
3616 /* If remote requests no-bonding follow that lead */
3617 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3618 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3619 return conn->remote_auth | (conn->auth_type & 0x01);
3621 /* If both remote and local have enough IO capabilities, require
3624 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3625 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3626 return conn->remote_auth | 0x01;
3628 /* No MITM protection possible so ignore remote requirement */
3629 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3632 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3634 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3635 struct hci_conn *conn;
3637 BT_DBG("%s", hdev->name);
3641 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3645 hci_conn_hold(conn);
3647 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3650 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3651 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3652 struct hci_cp_io_capability_reply cp;
3654 bacpy(&cp.bdaddr, &ev->bdaddr);
3655 /* Change the IO capability from KeyboardDisplay
3656 * to DisplayYesNo as it is not supported by BT spec. */
3657 cp.capability = (conn->io_capability == 0x04) ?
3658 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3660 /* If we are initiators, there is no remote information yet */
3661 if (conn->remote_auth == 0xff) {
3662 cp.authentication = conn->auth_type;
3664 /* Request MITM protection if our IO caps allow it
3665 * except for the no-bonding case.
3666 * conn->auth_type is not updated here since
3667 * that might cause the user confirmation to be
3668 * rejected in case the remote doesn't have the
3669 * IO capabilities for MITM.
3671 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3672 cp.authentication != HCI_AT_NO_BONDING)
3673 cp.authentication |= 0x01;
3675 conn->auth_type = hci_get_auth_req(conn);
3676 cp.authentication = conn->auth_type;
3679 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3680 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3685 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3688 struct hci_cp_io_capability_neg_reply cp;
3690 bacpy(&cp.bdaddr, &ev->bdaddr);
3691 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3693 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3698 hci_dev_unlock(hdev);
3701 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3703 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3704 struct hci_conn *conn;
3706 BT_DBG("%s", hdev->name);
3710 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3714 conn->remote_cap = ev->capability;
3715 conn->remote_auth = ev->authentication;
3717 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3720 hci_dev_unlock(hdev);
3723 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3724 struct sk_buff *skb)
3726 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3727 int loc_mitm, rem_mitm, confirm_hint = 0;
3728 struct hci_conn *conn;
3730 BT_DBG("%s", hdev->name);
3734 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3737 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3741 loc_mitm = (conn->auth_type & 0x01);
3742 rem_mitm = (conn->remote_auth & 0x01);
3744 /* If we require MITM but the remote device can't provide that
3745 * (it has NoInputNoOutput) then reject the confirmation request
3747 if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3748 BT_DBG("Rejecting request: remote device can't provide MITM");
3749 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3750 sizeof(ev->bdaddr), &ev->bdaddr);
3754 /* If no side requires MITM protection; auto-accept */
3755 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3756 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3758 /* If we're not the initiators request authorization to
3759 * proceed from user space (mgmt_user_confirm with
3760 * confirm_hint set to 1). The exception is if neither
3761 * side had MITM in which case we do auto-accept.
3763 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3764 (loc_mitm || rem_mitm)) {
3765 BT_DBG("Confirming auto-accept as acceptor");
3770 BT_DBG("Auto-accept of user confirmation with %ums delay",
3771 hdev->auto_accept_delay);
3773 if (hdev->auto_accept_delay > 0) {
3774 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3775 queue_delayed_work(conn->hdev->workqueue,
3776 &conn->auto_accept_work, delay);
3780 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3781 sizeof(ev->bdaddr), &ev->bdaddr);
3786 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3787 le32_to_cpu(ev->passkey), confirm_hint);
3790 hci_dev_unlock(hdev);
3793 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3794 struct sk_buff *skb)
3796 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3798 BT_DBG("%s", hdev->name);
3800 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3801 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3804 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3805 struct sk_buff *skb)
3807 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3808 struct hci_conn *conn;
3810 BT_DBG("%s", hdev->name);
3812 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3816 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3817 conn->passkey_entered = 0;
3819 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3820 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3821 conn->dst_type, conn->passkey_notify,
3822 conn->passkey_entered);
3825 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3827 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3828 struct hci_conn *conn;
3830 BT_DBG("%s", hdev->name);
3832 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3837 case HCI_KEYPRESS_STARTED:
3838 conn->passkey_entered = 0;
3841 case HCI_KEYPRESS_ENTERED:
3842 conn->passkey_entered++;
3845 case HCI_KEYPRESS_ERASED:
3846 conn->passkey_entered--;
3849 case HCI_KEYPRESS_CLEARED:
3850 conn->passkey_entered = 0;
3853 case HCI_KEYPRESS_COMPLETED:
3857 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3858 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3859 conn->dst_type, conn->passkey_notify,
3860 conn->passkey_entered);
3863 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3864 struct sk_buff *skb)
3866 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3867 struct hci_conn *conn;
3869 BT_DBG("%s", hdev->name);
3873 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3877 /* To avoid duplicate auth_failed events to user space we check
3878 * the HCI_CONN_AUTH_PEND flag which will be set if we
3879 * initiated the authentication. A traditional auth_complete
3880 * event gets always produced as initiator and is also mapped to
3881 * the mgmt_auth_failed event */
3882 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3883 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3886 hci_conn_drop(conn);
3889 hci_dev_unlock(hdev);
3892 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3893 struct sk_buff *skb)
3895 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3896 struct inquiry_entry *ie;
3897 struct hci_conn *conn;
3899 BT_DBG("%s", hdev->name);
3903 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3905 memcpy(conn->features[1], ev->features, 8);
3907 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3909 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3911 hci_dev_unlock(hdev);
3914 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3915 struct sk_buff *skb)
3917 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3918 struct oob_data *data;
3920 BT_DBG("%s", hdev->name);
3924 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3927 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3929 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3930 struct hci_cp_remote_oob_ext_data_reply cp;
3932 bacpy(&cp.bdaddr, &ev->bdaddr);
3933 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3934 memcpy(cp.randomizer192, data->randomizer192,
3935 sizeof(cp.randomizer192));
3936 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3937 memcpy(cp.randomizer256, data->randomizer256,
3938 sizeof(cp.randomizer256));
3940 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3943 struct hci_cp_remote_oob_data_reply cp;
3945 bacpy(&cp.bdaddr, &ev->bdaddr);
3946 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3947 memcpy(cp.randomizer, data->randomizer192,
3948 sizeof(cp.randomizer));
3950 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3954 struct hci_cp_remote_oob_data_neg_reply cp;
3956 bacpy(&cp.bdaddr, &ev->bdaddr);
3957 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3962 hci_dev_unlock(hdev);
3965 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3966 struct sk_buff *skb)
3968 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3969 struct hci_conn *hcon, *bredr_hcon;
3971 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3976 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3978 hci_dev_unlock(hdev);
3984 hci_dev_unlock(hdev);
3988 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3990 hcon->state = BT_CONNECTED;
3991 bacpy(&hcon->dst, &bredr_hcon->dst);
3993 hci_conn_hold(hcon);
3994 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3995 hci_conn_drop(hcon);
3997 hci_conn_add_sysfs(hcon);
3999 amp_physical_cfm(bredr_hcon, hcon);
4001 hci_dev_unlock(hdev);
4004 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4006 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4007 struct hci_conn *hcon;
4008 struct hci_chan *hchan;
4009 struct amp_mgr *mgr;
4011 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4012 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4015 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4019 /* Create AMP hchan */
4020 hchan = hci_chan_create(hcon);
4024 hchan->handle = le16_to_cpu(ev->handle);
4026 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4028 mgr = hcon->amp_mgr;
4029 if (mgr && mgr->bredr_chan) {
4030 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4032 l2cap_chan_lock(bredr_chan);
4034 bredr_chan->conn->mtu = hdev->block_mtu;
4035 l2cap_logical_cfm(bredr_chan, hchan, 0);
4036 hci_conn_hold(hcon);
4038 l2cap_chan_unlock(bredr_chan);
4042 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4043 struct sk_buff *skb)
4045 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4046 struct hci_chan *hchan;
4048 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4049 le16_to_cpu(ev->handle), ev->status);
4056 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4060 amp_destroy_logical_link(hchan, ev->reason);
4063 hci_dev_unlock(hdev);
4066 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4067 struct sk_buff *skb)
4069 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4070 struct hci_conn *hcon;
4072 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4079 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4081 hcon->state = BT_CLOSED;
4085 hci_dev_unlock(hdev);
4088 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4090 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4091 struct hci_conn_params *params;
4092 struct hci_conn *conn;
4093 struct smp_irk *irk;
4096 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4100 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4102 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
4104 BT_ERR("No memory for new connection");
4108 conn->dst_type = ev->bdaddr_type;
4110 if (ev->role == LE_CONN_ROLE_MASTER) {
4112 set_bit(HCI_CONN_MASTER, &conn->flags);
4115 /* If we didn't have a hci_conn object previously
4116 * but we're in master role this must be something
4117 * initiated using a white list. Since white list based
4118 * connections are not "first class citizens" we don't
4119 * have full tracking of them. Therefore, we go ahead
4120 * with a "best effort" approach of determining the
4121 * initiator address based on the HCI_PRIVACY flag.
4124 conn->resp_addr_type = ev->bdaddr_type;
4125 bacpy(&conn->resp_addr, &ev->bdaddr);
4126 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4127 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4128 bacpy(&conn->init_addr, &hdev->rpa);
4130 hci_copy_identity_address(hdev,
4132 &conn->init_addr_type);
4136 cancel_delayed_work(&conn->le_conn_timeout);
4140 /* Set the responder (our side) address type based on
4141 * the advertising address type.
4143 conn->resp_addr_type = hdev->adv_addr_type;
4144 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4145 bacpy(&conn->resp_addr, &hdev->random_addr);
4147 bacpy(&conn->resp_addr, &hdev->bdaddr);
4149 conn->init_addr_type = ev->bdaddr_type;
4150 bacpy(&conn->init_addr, &ev->bdaddr);
4152 /* For incoming connections, set the default minimum
4153 * and maximum connection interval. They will be used
4154 * to check if the parameters are in range and if not
4155 * trigger the connection update procedure.
4157 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4158 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4161 /* Lookup the identity address from the stored connection
4162 * address and address type.
4164 * When establishing connections to an identity address, the
4165 * connection procedure will store the resolvable random
4166 * address first. Now if it can be converted back into the
4167 * identity address, start using the identity address from
4170 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4172 bacpy(&conn->dst, &irk->bdaddr);
4173 conn->dst_type = irk->addr_type;
4176 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4177 addr_type = BDADDR_LE_PUBLIC;
4179 addr_type = BDADDR_LE_RANDOM;
4181 /* Drop the connection if he device is blocked */
4182 if (hci_blacklist_lookup(hdev, &conn->dst, addr_type)) {
4183 hci_conn_drop(conn);
4188 hci_le_conn_failed(conn, ev->status);
4192 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4193 mgmt_device_connected(hdev, &conn->dst, conn->type,
4194 conn->dst_type, 0, NULL, 0, NULL);
4196 conn->sec_level = BT_SECURITY_LOW;
4197 conn->handle = __le16_to_cpu(ev->handle);
4198 conn->state = BT_CONNECTED;
4200 conn->le_conn_interval = le16_to_cpu(ev->interval);
4201 conn->le_conn_latency = le16_to_cpu(ev->latency);
4202 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4204 hci_conn_add_sysfs(conn);
4206 hci_proto_connect_cfm(conn, ev->status);
4208 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
4210 list_del_init(¶ms->action);
4213 hci_update_background_scan(hdev);
4214 hci_dev_unlock(hdev);
4217 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4218 struct sk_buff *skb)
4220 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4221 struct hci_conn *conn;
4223 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4230 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4232 conn->le_conn_interval = le16_to_cpu(ev->interval);
4233 conn->le_conn_latency = le16_to_cpu(ev->latency);
4234 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4237 hci_dev_unlock(hdev);
4240 /* This function requires the caller holds hdev->lock */
4241 static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4242 u8 addr_type, u8 adv_type)
4244 struct hci_conn *conn;
4246 /* If the event is not connectable don't proceed further */
4247 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4250 /* Ignore if the device is blocked */
4251 if (hci_blacklist_lookup(hdev, addr, addr_type))
4254 /* If we're connectable, always connect any ADV_DIRECT_IND event */
4255 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
4256 adv_type == LE_ADV_DIRECT_IND)
4259 /* If we're not connectable only connect devices that we have in
4260 * our pend_le_conns list.
4262 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, addr_type))
4266 /* Request connection in master = true role */
4267 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4268 HCI_LE_AUTOCONN_TIMEOUT, true);
4272 switch (PTR_ERR(conn)) {
4274 /* If hci_connect() returns -EBUSY it means there is already
4275 * an LE connection attempt going on. Since controllers don't
4276 * support more than one connection attempt at the time, we
4277 * don't consider this an error case.
4281 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4285 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4286 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4288 struct discovery_state *d = &hdev->discovery;
4289 struct smp_irk *irk;
4293 /* Check if we need to convert to identity address */
4294 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4296 bdaddr = &irk->bdaddr;
4297 bdaddr_type = irk->addr_type;
4300 /* Check if we have been requested to connect to this device */
4301 check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4303 /* Passive scanning shouldn't trigger any device found events,
4304 * except for devices marked as CONN_REPORT for which we do send
4305 * device found events.
4307 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4308 struct hci_conn_params *param;
4310 if (type == LE_ADV_DIRECT_IND)
4313 param = hci_pend_le_action_lookup(&hdev->pend_le_reports,
4314 bdaddr, bdaddr_type);
4318 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4319 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4322 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4323 rssi, flags, data, len, NULL, 0);
4327 /* When receiving non-connectable or scannable undirected
4328 * advertising reports, this means that the remote device is
4329 * not connectable and then clearly indicate this in the
4330 * device found event.
4332 * When receiving a scan response, then there is no way to
4333 * know if the remote device is connectable or not. However
4334 * since scan responses are merged with a previously seen
4335 * advertising report, the flags field from that report
4338 * In the really unlikely case that a controller get confused
4339 * and just sends a scan response event, then it is marked as
4340 * not connectable as well.
4342 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4343 type == LE_ADV_SCAN_RSP)
4344 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4348 /* If there's nothing pending either store the data from this
4349 * event or send an immediate device found event if the data
4350 * should not be stored for later.
4352 if (!has_pending_adv_report(hdev)) {
4353 /* If the report will trigger a SCAN_REQ store it for
4356 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4357 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4358 rssi, flags, data, len);
4362 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4363 rssi, flags, data, len, NULL, 0);
4367 /* Check if the pending report is for the same device as the new one */
4368 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4369 bdaddr_type == d->last_adv_addr_type);
4371 /* If the pending data doesn't match this report or this isn't a
4372 * scan response (e.g. we got a duplicate ADV_IND) then force
4373 * sending of the pending data.
4375 if (type != LE_ADV_SCAN_RSP || !match) {
4376 /* Send out whatever is in the cache, but skip duplicates */
4378 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4379 d->last_adv_addr_type, NULL,
4380 d->last_adv_rssi, d->last_adv_flags,
4382 d->last_adv_data_len, NULL, 0);
4384 /* If the new report will trigger a SCAN_REQ store it for
4387 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4388 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4389 rssi, flags, data, len);
4393 /* The advertising reports cannot be merged, so clear
4394 * the pending report and send out a device found event.
4396 clear_pending_adv_report(hdev);
4397 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4398 rssi, flags, data, len, NULL, 0);
4402 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4403 * the new event is a SCAN_RSP. We can therefore proceed with
4404 * sending a merged device found event.
4406 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4407 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4408 d->last_adv_data, d->last_adv_data_len, data, len);
4409 clear_pending_adv_report(hdev);
4412 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4414 u8 num_reports = skb->data[0];
4415 void *ptr = &skb->data[1];
4419 while (num_reports--) {
4420 struct hci_ev_le_advertising_info *ev = ptr;
4423 rssi = ev->data[ev->length];
4424 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4425 ev->bdaddr_type, rssi, ev->data, ev->length);
4427 ptr += sizeof(*ev) + ev->length + 1;
4430 hci_dev_unlock(hdev);
4433 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4435 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4436 struct hci_cp_le_ltk_reply cp;
4437 struct hci_cp_le_ltk_neg_reply neg;
4438 struct hci_conn *conn;
4439 struct smp_ltk *ltk;
4441 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4445 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4449 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
4453 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4454 cp.handle = cpu_to_le16(conn->handle);
4456 if (ltk->authenticated)
4457 conn->pending_sec_level = BT_SECURITY_HIGH;
4459 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4461 conn->enc_key_size = ltk->enc_size;
4463 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4465 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4466 * temporary key used to encrypt a connection following
4467 * pairing. It is used during the Encrypted Session Setup to
4468 * distribute the keys. Later, security can be re-established
4469 * using a distributed LTK.
4471 if (ltk->type == SMP_STK) {
4472 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4473 list_del(<k->list);
4476 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4479 hci_dev_unlock(hdev);
4484 neg.handle = ev->handle;
4485 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4486 hci_dev_unlock(hdev);
4489 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4492 struct hci_cp_le_conn_param_req_neg_reply cp;
4494 cp.handle = cpu_to_le16(handle);
4497 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4501 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4502 struct sk_buff *skb)
4504 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4505 struct hci_cp_le_conn_param_req_reply cp;
4506 struct hci_conn *hcon;
4507 u16 handle, min, max, latency, timeout;
4509 handle = le16_to_cpu(ev->handle);
4510 min = le16_to_cpu(ev->interval_min);
4511 max = le16_to_cpu(ev->interval_max);
4512 latency = le16_to_cpu(ev->latency);
4513 timeout = le16_to_cpu(ev->timeout);
4515 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4516 if (!hcon || hcon->state != BT_CONNECTED)
4517 return send_conn_param_neg_reply(hdev, handle,
4518 HCI_ERROR_UNKNOWN_CONN_ID);
4520 if (hci_check_conn_params(min, max, latency, timeout))
4521 return send_conn_param_neg_reply(hdev, handle,
4522 HCI_ERROR_INVALID_LL_PARAMS);
4524 if (test_bit(HCI_CONN_MASTER, &hcon->flags)) {
4525 struct hci_conn_params *params;
4530 params = hci_conn_params_lookup(hdev, &hcon->dst,
4533 params->conn_min_interval = min;
4534 params->conn_max_interval = max;
4535 params->conn_latency = latency;
4536 params->supervision_timeout = timeout;
4542 hci_dev_unlock(hdev);
4544 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4545 store_hint, min, max, latency, timeout);
4548 cp.handle = ev->handle;
4549 cp.interval_min = ev->interval_min;
4550 cp.interval_max = ev->interval_max;
4551 cp.latency = ev->latency;
4552 cp.timeout = ev->timeout;
4556 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4559 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4561 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4563 skb_pull(skb, sizeof(*le_ev));
4565 switch (le_ev->subevent) {
4566 case HCI_EV_LE_CONN_COMPLETE:
4567 hci_le_conn_complete_evt(hdev, skb);
4570 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4571 hci_le_conn_update_complete_evt(hdev, skb);
4574 case HCI_EV_LE_ADVERTISING_REPORT:
4575 hci_le_adv_report_evt(hdev, skb);
4578 case HCI_EV_LE_LTK_REQ:
4579 hci_le_ltk_request_evt(hdev, skb);
4582 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4583 hci_le_remote_conn_param_req_evt(hdev, skb);
4591 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4593 struct hci_ev_channel_selected *ev = (void *) skb->data;
4594 struct hci_conn *hcon;
4596 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4598 skb_pull(skb, sizeof(*ev));
4600 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4604 amp_read_loc_assoc_final_data(hdev, hcon);
4607 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4609 struct hci_event_hdr *hdr = (void *) skb->data;
4610 __u8 event = hdr->evt;
4614 /* Received events are (currently) only needed when a request is
4615 * ongoing so avoid unnecessary memory allocation.
4617 if (hdev->req_status == HCI_REQ_PEND) {
4618 kfree_skb(hdev->recv_evt);
4619 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4622 hci_dev_unlock(hdev);
4624 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4626 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4627 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4628 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4630 hci_req_cmd_complete(hdev, opcode, 0);
4634 case HCI_EV_INQUIRY_COMPLETE:
4635 hci_inquiry_complete_evt(hdev, skb);
4638 case HCI_EV_INQUIRY_RESULT:
4639 hci_inquiry_result_evt(hdev, skb);
4642 case HCI_EV_CONN_COMPLETE:
4643 hci_conn_complete_evt(hdev, skb);
4646 case HCI_EV_CONN_REQUEST:
4647 hci_conn_request_evt(hdev, skb);
4650 case HCI_EV_DISCONN_COMPLETE:
4651 hci_disconn_complete_evt(hdev, skb);
4654 case HCI_EV_AUTH_COMPLETE:
4655 hci_auth_complete_evt(hdev, skb);
4658 case HCI_EV_REMOTE_NAME:
4659 hci_remote_name_evt(hdev, skb);
4662 case HCI_EV_ENCRYPT_CHANGE:
4663 hci_encrypt_change_evt(hdev, skb);
4666 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4667 hci_change_link_key_complete_evt(hdev, skb);
4670 case HCI_EV_REMOTE_FEATURES:
4671 hci_remote_features_evt(hdev, skb);
4674 case HCI_EV_CMD_COMPLETE:
4675 hci_cmd_complete_evt(hdev, skb);
4678 case HCI_EV_CMD_STATUS:
4679 hci_cmd_status_evt(hdev, skb);
4682 case HCI_EV_ROLE_CHANGE:
4683 hci_role_change_evt(hdev, skb);
4686 case HCI_EV_NUM_COMP_PKTS:
4687 hci_num_comp_pkts_evt(hdev, skb);
4690 case HCI_EV_MODE_CHANGE:
4691 hci_mode_change_evt(hdev, skb);
4694 case HCI_EV_PIN_CODE_REQ:
4695 hci_pin_code_request_evt(hdev, skb);
4698 case HCI_EV_LINK_KEY_REQ:
4699 hci_link_key_request_evt(hdev, skb);
4702 case HCI_EV_LINK_KEY_NOTIFY:
4703 hci_link_key_notify_evt(hdev, skb);
4706 case HCI_EV_CLOCK_OFFSET:
4707 hci_clock_offset_evt(hdev, skb);
4710 case HCI_EV_PKT_TYPE_CHANGE:
4711 hci_pkt_type_change_evt(hdev, skb);
4714 case HCI_EV_PSCAN_REP_MODE:
4715 hci_pscan_rep_mode_evt(hdev, skb);
4718 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4719 hci_inquiry_result_with_rssi_evt(hdev, skb);
4722 case HCI_EV_REMOTE_EXT_FEATURES:
4723 hci_remote_ext_features_evt(hdev, skb);
4726 case HCI_EV_SYNC_CONN_COMPLETE:
4727 hci_sync_conn_complete_evt(hdev, skb);
4730 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4731 hci_extended_inquiry_result_evt(hdev, skb);
4734 case HCI_EV_KEY_REFRESH_COMPLETE:
4735 hci_key_refresh_complete_evt(hdev, skb);
4738 case HCI_EV_IO_CAPA_REQUEST:
4739 hci_io_capa_request_evt(hdev, skb);
4742 case HCI_EV_IO_CAPA_REPLY:
4743 hci_io_capa_reply_evt(hdev, skb);
4746 case HCI_EV_USER_CONFIRM_REQUEST:
4747 hci_user_confirm_request_evt(hdev, skb);
4750 case HCI_EV_USER_PASSKEY_REQUEST:
4751 hci_user_passkey_request_evt(hdev, skb);
4754 case HCI_EV_USER_PASSKEY_NOTIFY:
4755 hci_user_passkey_notify_evt(hdev, skb);
4758 case HCI_EV_KEYPRESS_NOTIFY:
4759 hci_keypress_notify_evt(hdev, skb);
4762 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4763 hci_simple_pair_complete_evt(hdev, skb);
4766 case HCI_EV_REMOTE_HOST_FEATURES:
4767 hci_remote_host_features_evt(hdev, skb);
4770 case HCI_EV_LE_META:
4771 hci_le_meta_evt(hdev, skb);
4774 case HCI_EV_CHANNEL_SELECTED:
4775 hci_chan_selected_evt(hdev, skb);
4778 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4779 hci_remote_oob_data_request_evt(hdev, skb);
4782 case HCI_EV_PHY_LINK_COMPLETE:
4783 hci_phy_link_complete_evt(hdev, skb);
4786 case HCI_EV_LOGICAL_LINK_COMPLETE:
4787 hci_loglink_complete_evt(hdev, skb);
4790 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4791 hci_disconn_loglink_complete_evt(hdev, skb);
4794 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4795 hci_disconn_phylink_complete_evt(hdev, skb);
4798 case HCI_EV_NUM_COMP_BLOCKS:
4799 hci_num_comp_blocks_evt(hdev, skb);
4803 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4808 hdev->stat.evt_rx++;