2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "hci_request.h"
38 #define MGMT_VERSION 1
39 #define MGMT_REVISION 8
41 static const u16 mgmt_commands[] = {
42 MGMT_OP_READ_INDEX_LIST,
45 MGMT_OP_SET_DISCOVERABLE,
46 MGMT_OP_SET_CONNECTABLE,
47 MGMT_OP_SET_FAST_CONNECTABLE,
49 MGMT_OP_SET_LINK_SECURITY,
53 MGMT_OP_SET_DEV_CLASS,
54 MGMT_OP_SET_LOCAL_NAME,
57 MGMT_OP_LOAD_LINK_KEYS,
58 MGMT_OP_LOAD_LONG_TERM_KEYS,
60 MGMT_OP_GET_CONNECTIONS,
61 MGMT_OP_PIN_CODE_REPLY,
62 MGMT_OP_PIN_CODE_NEG_REPLY,
63 MGMT_OP_SET_IO_CAPABILITY,
65 MGMT_OP_CANCEL_PAIR_DEVICE,
66 MGMT_OP_UNPAIR_DEVICE,
67 MGMT_OP_USER_CONFIRM_REPLY,
68 MGMT_OP_USER_CONFIRM_NEG_REPLY,
69 MGMT_OP_USER_PASSKEY_REPLY,
70 MGMT_OP_USER_PASSKEY_NEG_REPLY,
71 MGMT_OP_READ_LOCAL_OOB_DATA,
72 MGMT_OP_ADD_REMOTE_OOB_DATA,
73 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
74 MGMT_OP_START_DISCOVERY,
75 MGMT_OP_STOP_DISCOVERY,
78 MGMT_OP_UNBLOCK_DEVICE,
79 MGMT_OP_SET_DEVICE_ID,
80 MGMT_OP_SET_ADVERTISING,
82 MGMT_OP_SET_STATIC_ADDRESS,
83 MGMT_OP_SET_SCAN_PARAMS,
84 MGMT_OP_SET_SECURE_CONN,
85 MGMT_OP_SET_DEBUG_KEYS,
88 MGMT_OP_GET_CONN_INFO,
89 MGMT_OP_GET_CLOCK_INFO,
91 MGMT_OP_REMOVE_DEVICE,
92 MGMT_OP_LOAD_CONN_PARAM,
93 MGMT_OP_READ_UNCONF_INDEX_LIST,
94 MGMT_OP_READ_CONFIG_INFO,
95 MGMT_OP_SET_EXTERNAL_CONFIG,
96 MGMT_OP_SET_PUBLIC_ADDRESS,
97 MGMT_OP_START_SERVICE_DISCOVERY,
100 static const u16 mgmt_events[] = {
101 MGMT_EV_CONTROLLER_ERROR,
103 MGMT_EV_INDEX_REMOVED,
104 MGMT_EV_NEW_SETTINGS,
105 MGMT_EV_CLASS_OF_DEV_CHANGED,
106 MGMT_EV_LOCAL_NAME_CHANGED,
107 MGMT_EV_NEW_LINK_KEY,
108 MGMT_EV_NEW_LONG_TERM_KEY,
109 MGMT_EV_DEVICE_CONNECTED,
110 MGMT_EV_DEVICE_DISCONNECTED,
111 MGMT_EV_CONNECT_FAILED,
112 MGMT_EV_PIN_CODE_REQUEST,
113 MGMT_EV_USER_CONFIRM_REQUEST,
114 MGMT_EV_USER_PASSKEY_REQUEST,
116 MGMT_EV_DEVICE_FOUND,
118 MGMT_EV_DEVICE_BLOCKED,
119 MGMT_EV_DEVICE_UNBLOCKED,
120 MGMT_EV_DEVICE_UNPAIRED,
121 MGMT_EV_PASSKEY_NOTIFY,
124 MGMT_EV_DEVICE_ADDED,
125 MGMT_EV_DEVICE_REMOVED,
126 MGMT_EV_NEW_CONN_PARAM,
127 MGMT_EV_UNCONF_INDEX_ADDED,
128 MGMT_EV_UNCONF_INDEX_REMOVED,
129 MGMT_EV_NEW_CONFIG_OPTIONS,
132 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
134 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
135 "\x00\x00\x00\x00\x00\x00\x00\x00"
138 struct list_head list;
145 int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
148 /* HCI to MGMT error code conversion table */
149 static u8 mgmt_status_table[] = {
151 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
152 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
153 MGMT_STATUS_FAILED, /* Hardware Failure */
154 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
155 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
156 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
157 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
158 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
159 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
160 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
161 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
162 MGMT_STATUS_BUSY, /* Command Disallowed */
163 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
164 MGMT_STATUS_REJECTED, /* Rejected Security */
165 MGMT_STATUS_REJECTED, /* Rejected Personal */
166 MGMT_STATUS_TIMEOUT, /* Host Timeout */
167 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
168 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
169 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
170 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
171 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
172 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
173 MGMT_STATUS_BUSY, /* Repeated Attempts */
174 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
175 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
176 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
177 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
178 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
179 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
180 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
181 MGMT_STATUS_FAILED, /* Unspecified Error */
182 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
183 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
184 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
185 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
186 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
187 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
188 MGMT_STATUS_FAILED, /* Unit Link Key Used */
189 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
190 MGMT_STATUS_TIMEOUT, /* Instant Passed */
191 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
192 MGMT_STATUS_FAILED, /* Transaction Collision */
193 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
194 MGMT_STATUS_REJECTED, /* QoS Rejected */
195 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
196 MGMT_STATUS_REJECTED, /* Insufficient Security */
197 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
198 MGMT_STATUS_BUSY, /* Role Switch Pending */
199 MGMT_STATUS_FAILED, /* Slot Violation */
200 MGMT_STATUS_FAILED, /* Role Switch Failed */
201 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
202 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
203 MGMT_STATUS_BUSY, /* Host Busy Pairing */
204 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
205 MGMT_STATUS_BUSY, /* Controller Busy */
206 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
207 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
208 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
209 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
210 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
213 static u8 mgmt_status(u8 hci_status)
215 if (hci_status < ARRAY_SIZE(mgmt_status_table))
216 return mgmt_status_table[hci_status];
218 return MGMT_STATUS_FAILED;
221 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
222 struct sock *skip_sk)
225 struct mgmt_hdr *hdr;
227 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
231 hdr = (void *) skb_put(skb, sizeof(*hdr));
232 hdr->opcode = cpu_to_le16(event);
234 hdr->index = cpu_to_le16(hdev->id);
236 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
237 hdr->len = cpu_to_le16(data_len);
240 memcpy(skb_put(skb, data_len), data, data_len);
243 __net_timestamp(skb);
245 hci_send_to_control(skb, skip_sk);
251 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
254 struct mgmt_hdr *hdr;
255 struct mgmt_ev_cmd_status *ev;
258 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
260 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
264 hdr = (void *) skb_put(skb, sizeof(*hdr));
266 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
267 hdr->index = cpu_to_le16(index);
268 hdr->len = cpu_to_le16(sizeof(*ev));
270 ev = (void *) skb_put(skb, sizeof(*ev));
272 ev->opcode = cpu_to_le16(cmd);
274 err = sock_queue_rcv_skb(sk, skb);
281 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
282 void *rp, size_t rp_len)
285 struct mgmt_hdr *hdr;
286 struct mgmt_ev_cmd_complete *ev;
289 BT_DBG("sock %p", sk);
291 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
295 hdr = (void *) skb_put(skb, sizeof(*hdr));
297 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
298 hdr->index = cpu_to_le16(index);
299 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
301 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
302 ev->opcode = cpu_to_le16(cmd);
306 memcpy(ev->data, rp, rp_len);
308 err = sock_queue_rcv_skb(sk, skb);
315 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
318 struct mgmt_rp_read_version rp;
320 BT_DBG("sock %p", sk);
322 rp.version = MGMT_VERSION;
323 rp.revision = cpu_to_le16(MGMT_REVISION);
325 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
329 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
332 struct mgmt_rp_read_commands *rp;
333 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
334 const u16 num_events = ARRAY_SIZE(mgmt_events);
339 BT_DBG("sock %p", sk);
341 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
343 rp = kmalloc(rp_size, GFP_KERNEL);
347 rp->num_commands = cpu_to_le16(num_commands);
348 rp->num_events = cpu_to_le16(num_events);
350 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
351 put_unaligned_le16(mgmt_commands[i], opcode);
353 for (i = 0; i < num_events; i++, opcode++)
354 put_unaligned_le16(mgmt_events[i], opcode);
356 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
363 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
366 struct mgmt_rp_read_index_list *rp;
372 BT_DBG("sock %p", sk);
374 read_lock(&hci_dev_list_lock);
377 list_for_each_entry(d, &hci_dev_list, list) {
378 if (d->dev_type == HCI_BREDR &&
379 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
383 rp_len = sizeof(*rp) + (2 * count);
384 rp = kmalloc(rp_len, GFP_ATOMIC);
386 read_unlock(&hci_dev_list_lock);
391 list_for_each_entry(d, &hci_dev_list, list) {
392 if (test_bit(HCI_SETUP, &d->dev_flags) ||
393 test_bit(HCI_CONFIG, &d->dev_flags) ||
394 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
397 /* Devices marked as raw-only are neither configured
398 * nor unconfigured controllers.
400 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
403 if (d->dev_type == HCI_BREDR &&
404 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
405 rp->index[count++] = cpu_to_le16(d->id);
406 BT_DBG("Added hci%u", d->id);
410 rp->num_controllers = cpu_to_le16(count);
411 rp_len = sizeof(*rp) + (2 * count);
413 read_unlock(&hci_dev_list_lock);
415 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
423 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
424 void *data, u16 data_len)
426 struct mgmt_rp_read_unconf_index_list *rp;
432 BT_DBG("sock %p", sk);
434 read_lock(&hci_dev_list_lock);
437 list_for_each_entry(d, &hci_dev_list, list) {
438 if (d->dev_type == HCI_BREDR &&
439 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
443 rp_len = sizeof(*rp) + (2 * count);
444 rp = kmalloc(rp_len, GFP_ATOMIC);
446 read_unlock(&hci_dev_list_lock);
451 list_for_each_entry(d, &hci_dev_list, list) {
452 if (test_bit(HCI_SETUP, &d->dev_flags) ||
453 test_bit(HCI_CONFIG, &d->dev_flags) ||
454 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
457 /* Devices marked as raw-only are neither configured
458 * nor unconfigured controllers.
460 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
463 if (d->dev_type == HCI_BREDR &&
464 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
465 rp->index[count++] = cpu_to_le16(d->id);
466 BT_DBG("Added hci%u", d->id);
470 rp->num_controllers = cpu_to_le16(count);
471 rp_len = sizeof(*rp) + (2 * count);
473 read_unlock(&hci_dev_list_lock);
475 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
483 static bool is_configured(struct hci_dev *hdev)
485 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
486 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
489 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
490 !bacmp(&hdev->public_addr, BDADDR_ANY))
496 static __le32 get_missing_options(struct hci_dev *hdev)
500 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
501 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
502 options |= MGMT_OPTION_EXTERNAL_CONFIG;
504 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
505 !bacmp(&hdev->public_addr, BDADDR_ANY))
506 options |= MGMT_OPTION_PUBLIC_ADDRESS;
508 return cpu_to_le32(options);
511 static int new_options(struct hci_dev *hdev, struct sock *skip)
513 __le32 options = get_missing_options(hdev);
515 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
516 sizeof(options), skip);
519 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
521 __le32 options = get_missing_options(hdev);
523 return cmd_complete(sk, hdev->id, opcode, 0, &options,
527 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
528 void *data, u16 data_len)
530 struct mgmt_rp_read_config_info rp;
533 BT_DBG("sock %p %s", sk, hdev->name);
537 memset(&rp, 0, sizeof(rp));
538 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
540 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
541 options |= MGMT_OPTION_EXTERNAL_CONFIG;
543 if (hdev->set_bdaddr)
544 options |= MGMT_OPTION_PUBLIC_ADDRESS;
546 rp.supported_options = cpu_to_le32(options);
547 rp.missing_options = get_missing_options(hdev);
549 hci_dev_unlock(hdev);
551 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
555 static u32 get_supported_settings(struct hci_dev *hdev)
559 settings |= MGMT_SETTING_POWERED;
560 settings |= MGMT_SETTING_BONDABLE;
561 settings |= MGMT_SETTING_DEBUG_KEYS;
562 settings |= MGMT_SETTING_CONNECTABLE;
563 settings |= MGMT_SETTING_DISCOVERABLE;
565 if (lmp_bredr_capable(hdev)) {
566 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
567 settings |= MGMT_SETTING_FAST_CONNECTABLE;
568 settings |= MGMT_SETTING_BREDR;
569 settings |= MGMT_SETTING_LINK_SECURITY;
571 if (lmp_ssp_capable(hdev)) {
572 settings |= MGMT_SETTING_SSP;
573 settings |= MGMT_SETTING_HS;
576 if (lmp_sc_capable(hdev))
577 settings |= MGMT_SETTING_SECURE_CONN;
580 if (lmp_le_capable(hdev)) {
581 settings |= MGMT_SETTING_LE;
582 settings |= MGMT_SETTING_ADVERTISING;
583 settings |= MGMT_SETTING_SECURE_CONN;
584 settings |= MGMT_SETTING_PRIVACY;
587 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
589 settings |= MGMT_SETTING_CONFIGURATION;
594 static u32 get_current_settings(struct hci_dev *hdev)
598 if (hdev_is_powered(hdev))
599 settings |= MGMT_SETTING_POWERED;
601 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
602 settings |= MGMT_SETTING_CONNECTABLE;
604 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
605 settings |= MGMT_SETTING_FAST_CONNECTABLE;
607 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
608 settings |= MGMT_SETTING_DISCOVERABLE;
610 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
611 settings |= MGMT_SETTING_BONDABLE;
613 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
614 settings |= MGMT_SETTING_BREDR;
616 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
617 settings |= MGMT_SETTING_LE;
619 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
620 settings |= MGMT_SETTING_LINK_SECURITY;
622 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
623 settings |= MGMT_SETTING_SSP;
625 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
626 settings |= MGMT_SETTING_HS;
628 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
629 settings |= MGMT_SETTING_ADVERTISING;
631 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
632 settings |= MGMT_SETTING_SECURE_CONN;
634 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
635 settings |= MGMT_SETTING_DEBUG_KEYS;
637 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
638 settings |= MGMT_SETTING_PRIVACY;
643 #define PNP_INFO_SVCLASS_ID 0x1200
645 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
647 u8 *ptr = data, *uuids_start = NULL;
648 struct bt_uuid *uuid;
653 list_for_each_entry(uuid, &hdev->uuids, list) {
656 if (uuid->size != 16)
659 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
663 if (uuid16 == PNP_INFO_SVCLASS_ID)
669 uuids_start[1] = EIR_UUID16_ALL;
673 /* Stop if not enough space to put next UUID */
674 if ((ptr - data) + sizeof(u16) > len) {
675 uuids_start[1] = EIR_UUID16_SOME;
679 *ptr++ = (uuid16 & 0x00ff);
680 *ptr++ = (uuid16 & 0xff00) >> 8;
681 uuids_start[0] += sizeof(uuid16);
687 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
689 u8 *ptr = data, *uuids_start = NULL;
690 struct bt_uuid *uuid;
695 list_for_each_entry(uuid, &hdev->uuids, list) {
696 if (uuid->size != 32)
702 uuids_start[1] = EIR_UUID32_ALL;
706 /* Stop if not enough space to put next UUID */
707 if ((ptr - data) + sizeof(u32) > len) {
708 uuids_start[1] = EIR_UUID32_SOME;
712 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
714 uuids_start[0] += sizeof(u32);
720 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
722 u8 *ptr = data, *uuids_start = NULL;
723 struct bt_uuid *uuid;
728 list_for_each_entry(uuid, &hdev->uuids, list) {
729 if (uuid->size != 128)
735 uuids_start[1] = EIR_UUID128_ALL;
739 /* Stop if not enough space to put next UUID */
740 if ((ptr - data) + 16 > len) {
741 uuids_start[1] = EIR_UUID128_SOME;
745 memcpy(ptr, uuid->uuid, 16);
747 uuids_start[0] += 16;
753 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
755 struct pending_cmd *cmd;
757 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
758 if (cmd->opcode == opcode)
765 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
766 struct hci_dev *hdev,
769 struct pending_cmd *cmd;
771 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
772 if (cmd->user_data != data)
774 if (cmd->opcode == opcode)
781 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
786 name_len = strlen(hdev->dev_name);
788 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
790 if (name_len > max_len) {
792 ptr[1] = EIR_NAME_SHORT;
794 ptr[1] = EIR_NAME_COMPLETE;
796 ptr[0] = name_len + 1;
798 memcpy(ptr + 2, hdev->dev_name, name_len);
800 ad_len += (name_len + 2);
801 ptr += (name_len + 2);
807 static void update_scan_rsp_data(struct hci_request *req)
809 struct hci_dev *hdev = req->hdev;
810 struct hci_cp_le_set_scan_rsp_data cp;
813 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
816 memset(&cp, 0, sizeof(cp));
818 len = create_scan_rsp_data(hdev, cp.data);
820 if (hdev->scan_rsp_data_len == len &&
821 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
824 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
825 hdev->scan_rsp_data_len = len;
829 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
832 static u8 get_adv_discov_flags(struct hci_dev *hdev)
834 struct pending_cmd *cmd;
836 /* If there's a pending mgmt command the flags will not yet have
837 * their final values, so check for this first.
839 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
841 struct mgmt_mode *cp = cmd->param;
843 return LE_AD_GENERAL;
844 else if (cp->val == 0x02)
845 return LE_AD_LIMITED;
847 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
848 return LE_AD_LIMITED;
849 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
850 return LE_AD_GENERAL;
856 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
858 u8 ad_len = 0, flags = 0;
860 flags |= get_adv_discov_flags(hdev);
862 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
863 flags |= LE_AD_NO_BREDR;
866 BT_DBG("adv flags 0x%02x", flags);
876 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
878 ptr[1] = EIR_TX_POWER;
879 ptr[2] = (u8) hdev->adv_tx_power;
888 static void update_adv_data(struct hci_request *req)
890 struct hci_dev *hdev = req->hdev;
891 struct hci_cp_le_set_adv_data cp;
894 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
897 memset(&cp, 0, sizeof(cp));
899 len = create_adv_data(hdev, cp.data);
901 if (hdev->adv_data_len == len &&
902 memcmp(cp.data, hdev->adv_data, len) == 0)
905 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
906 hdev->adv_data_len = len;
910 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
913 int mgmt_update_adv_data(struct hci_dev *hdev)
915 struct hci_request req;
917 hci_req_init(&req, hdev);
918 update_adv_data(&req);
920 return hci_req_run(&req, NULL);
923 static void create_eir(struct hci_dev *hdev, u8 *data)
928 name_len = strlen(hdev->dev_name);
934 ptr[1] = EIR_NAME_SHORT;
936 ptr[1] = EIR_NAME_COMPLETE;
938 /* EIR Data length */
939 ptr[0] = name_len + 1;
941 memcpy(ptr + 2, hdev->dev_name, name_len);
943 ptr += (name_len + 2);
946 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
948 ptr[1] = EIR_TX_POWER;
949 ptr[2] = (u8) hdev->inq_tx_power;
954 if (hdev->devid_source > 0) {
956 ptr[1] = EIR_DEVICE_ID;
958 put_unaligned_le16(hdev->devid_source, ptr + 2);
959 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
960 put_unaligned_le16(hdev->devid_product, ptr + 6);
961 put_unaligned_le16(hdev->devid_version, ptr + 8);
966 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
967 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
968 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
971 static void update_eir(struct hci_request *req)
973 struct hci_dev *hdev = req->hdev;
974 struct hci_cp_write_eir cp;
976 if (!hdev_is_powered(hdev))
979 if (!lmp_ext_inq_capable(hdev))
982 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
985 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
988 memset(&cp, 0, sizeof(cp));
990 create_eir(hdev, cp.data);
992 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
995 memcpy(hdev->eir, cp.data, sizeof(cp.data));
997 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1000 static u8 get_service_classes(struct hci_dev *hdev)
1002 struct bt_uuid *uuid;
1005 list_for_each_entry(uuid, &hdev->uuids, list)
1006 val |= uuid->svc_hint;
1011 static void update_class(struct hci_request *req)
1013 struct hci_dev *hdev = req->hdev;
1016 BT_DBG("%s", hdev->name);
1018 if (!hdev_is_powered(hdev))
1021 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1024 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1027 cod[0] = hdev->minor_class;
1028 cod[1] = hdev->major_class;
1029 cod[2] = get_service_classes(hdev);
1031 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1034 if (memcmp(cod, hdev->dev_class, 3) == 0)
1037 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1040 static bool get_connectable(struct hci_dev *hdev)
1042 struct pending_cmd *cmd;
1044 /* If there's a pending mgmt command the flag will not yet have
1045 * it's final value, so check for this first.
1047 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1049 struct mgmt_mode *cp = cmd->param;
1053 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1056 static void disable_advertising(struct hci_request *req)
1060 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1063 static void enable_advertising(struct hci_request *req)
1065 struct hci_dev *hdev = req->hdev;
1066 struct hci_cp_le_set_adv_param cp;
1067 u8 own_addr_type, enable = 0x01;
1070 if (hci_conn_num(hdev, LE_LINK) > 0)
1073 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1074 disable_advertising(req);
1076 /* Clear the HCI_LE_ADV bit temporarily so that the
1077 * hci_update_random_address knows that it's safe to go ahead
1078 * and write a new random address. The flag will be set back on
1079 * as soon as the SET_ADV_ENABLE HCI command completes.
1081 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1083 connectable = get_connectable(hdev);
1085 /* Set require_privacy to true only when non-connectable
1086 * advertising is used. In that case it is fine to use a
1087 * non-resolvable private address.
1089 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1092 memset(&cp, 0, sizeof(cp));
1093 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1094 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1095 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1096 cp.own_address_type = own_addr_type;
1097 cp.channel_map = hdev->le_adv_channel_map;
1099 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1101 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1104 static void service_cache_off(struct work_struct *work)
1106 struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 service_cache.work);
1108 struct hci_request req;
1110 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1113 hci_req_init(&req, hdev);
1120 hci_dev_unlock(hdev);
1122 hci_req_run(&req, NULL);
1125 static void rpa_expired(struct work_struct *work)
1127 struct hci_dev *hdev = container_of(work, struct hci_dev,
1129 struct hci_request req;
1133 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1135 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1138 /* The generation of a new RPA and programming it into the
1139 * controller happens in the enable_advertising() function.
1141 hci_req_init(&req, hdev);
1142 enable_advertising(&req);
1143 hci_req_run(&req, NULL);
1146 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1148 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1151 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1152 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1154 /* Non-mgmt controlled devices get this bit set
1155 * implicitly so that pairing works for them, however
1156 * for mgmt we require user-space to explicitly enable
1159 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1162 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1163 void *data, u16 data_len)
1165 struct mgmt_rp_read_info rp;
1167 BT_DBG("sock %p %s", sk, hdev->name);
1171 memset(&rp, 0, sizeof(rp));
1173 bacpy(&rp.bdaddr, &hdev->bdaddr);
1175 rp.version = hdev->hci_ver;
1176 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1178 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1179 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1181 memcpy(rp.dev_class, hdev->dev_class, 3);
1183 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1184 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1186 hci_dev_unlock(hdev);
1188 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1192 static void mgmt_pending_free(struct pending_cmd *cmd)
1199 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1200 struct hci_dev *hdev, void *data,
1203 struct pending_cmd *cmd;
1205 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1209 cmd->opcode = opcode;
1210 cmd->index = hdev->id;
1212 cmd->param = kmemdup(data, len, GFP_KERNEL);
1218 cmd->param_len = len;
1223 list_add(&cmd->list, &hdev->mgmt_pending);
1228 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1229 void (*cb)(struct pending_cmd *cmd,
1233 struct pending_cmd *cmd, *tmp;
1235 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1236 if (opcode > 0 && cmd->opcode != opcode)
1243 static void mgmt_pending_remove(struct pending_cmd *cmd)
1245 list_del(&cmd->list);
1246 mgmt_pending_free(cmd);
1249 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1251 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1253 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1257 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1259 BT_DBG("%s status 0x%02x", hdev->name, status);
1261 if (hci_conn_count(hdev) == 0) {
1262 cancel_delayed_work(&hdev->power_off);
1263 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1267 static bool hci_stop_discovery(struct hci_request *req)
1269 struct hci_dev *hdev = req->hdev;
1270 struct hci_cp_remote_name_req_cancel cp;
1271 struct inquiry_entry *e;
1273 switch (hdev->discovery.state) {
1274 case DISCOVERY_FINDING:
1275 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1276 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1278 cancel_delayed_work(&hdev->le_scan_disable);
1279 hci_req_add_le_scan_disable(req);
1284 case DISCOVERY_RESOLVING:
1285 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1290 bacpy(&cp.bdaddr, &e->data.bdaddr);
1291 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1297 /* Passive scanning */
1298 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1299 hci_req_add_le_scan_disable(req);
1309 static int clean_up_hci_state(struct hci_dev *hdev)
1311 struct hci_request req;
1312 struct hci_conn *conn;
1313 bool discov_stopped;
1316 hci_req_init(&req, hdev);
1318 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1319 test_bit(HCI_PSCAN, &hdev->flags)) {
1321 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1324 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1325 disable_advertising(&req);
1327 discov_stopped = hci_stop_discovery(&req);
1329 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1330 struct hci_cp_disconnect dc;
1331 struct hci_cp_reject_conn_req rej;
1333 switch (conn->state) {
1336 dc.handle = cpu_to_le16(conn->handle);
1337 dc.reason = 0x15; /* Terminated due to Power Off */
1338 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1341 if (conn->type == LE_LINK)
1342 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1344 else if (conn->type == ACL_LINK)
1345 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1349 bacpy(&rej.bdaddr, &conn->dst);
1350 rej.reason = 0x15; /* Terminated due to Power Off */
1351 if (conn->type == ACL_LINK)
1352 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1354 else if (conn->type == SCO_LINK)
1355 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1361 err = hci_req_run(&req, clean_up_hci_complete);
1362 if (!err && discov_stopped)
1363 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1368 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1371 struct mgmt_mode *cp = data;
1372 struct pending_cmd *cmd;
1375 BT_DBG("request for %s", hdev->name);
1377 if (cp->val != 0x00 && cp->val != 0x01)
1378 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1379 MGMT_STATUS_INVALID_PARAMS);
1383 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1384 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1389 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1390 cancel_delayed_work(&hdev->power_off);
1393 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1395 err = mgmt_powered(hdev, 1);
1400 if (!!cp->val == hdev_is_powered(hdev)) {
1401 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1405 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1412 queue_work(hdev->req_workqueue, &hdev->power_on);
1415 /* Disconnect connections, stop scans, etc */
1416 err = clean_up_hci_state(hdev);
1418 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1419 HCI_POWER_OFF_TIMEOUT);
1421 /* ENODATA means there were no HCI commands queued */
1422 if (err == -ENODATA) {
1423 cancel_delayed_work(&hdev->power_off);
1424 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1430 hci_dev_unlock(hdev);
1434 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1438 ev = cpu_to_le32(get_current_settings(hdev));
1440 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1443 int mgmt_new_settings(struct hci_dev *hdev)
1445 return new_settings(hdev, NULL);
1450 struct hci_dev *hdev;
1454 static void settings_rsp(struct pending_cmd *cmd, void *data)
1456 struct cmd_lookup *match = data;
1458 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1460 list_del(&cmd->list);
1462 if (match->sk == NULL) {
1463 match->sk = cmd->sk;
1464 sock_hold(match->sk);
1467 mgmt_pending_free(cmd);
1470 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1474 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1475 mgmt_pending_remove(cmd);
1478 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1480 if (cmd->cmd_complete) {
1483 cmd->cmd_complete(cmd, *status);
1484 mgmt_pending_remove(cmd);
1489 cmd_status_rsp(cmd, data);
1492 static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1494 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1495 cmd->param, cmd->param_len);
1498 static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1500 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1501 sizeof(struct mgmt_addr_info));
1504 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1506 if (!lmp_bredr_capable(hdev))
1507 return MGMT_STATUS_NOT_SUPPORTED;
1508 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1509 return MGMT_STATUS_REJECTED;
1511 return MGMT_STATUS_SUCCESS;
1514 static u8 mgmt_le_support(struct hci_dev *hdev)
1516 if (!lmp_le_capable(hdev))
1517 return MGMT_STATUS_NOT_SUPPORTED;
1518 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1519 return MGMT_STATUS_REJECTED;
1521 return MGMT_STATUS_SUCCESS;
1524 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1527 struct pending_cmd *cmd;
1528 struct mgmt_mode *cp;
1529 struct hci_request req;
1532 BT_DBG("status 0x%02x", status);
1536 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1541 u8 mgmt_err = mgmt_status(status);
1542 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1543 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1549 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1552 if (hdev->discov_timeout > 0) {
1553 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1554 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1558 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1562 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1565 new_settings(hdev, cmd->sk);
1567 /* When the discoverable mode gets changed, make sure
1568 * that class of device has the limited discoverable
1569 * bit correctly set. Also update page scan based on whitelist
1572 hci_req_init(&req, hdev);
1573 __hci_update_page_scan(&req);
1575 hci_req_run(&req, NULL);
1578 mgmt_pending_remove(cmd);
1581 hci_dev_unlock(hdev);
1584 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1587 struct mgmt_cp_set_discoverable *cp = data;
1588 struct pending_cmd *cmd;
1589 struct hci_request req;
1594 BT_DBG("request for %s", hdev->name);
1596 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1597 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1598 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 MGMT_STATUS_REJECTED);
1601 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1602 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1603 MGMT_STATUS_INVALID_PARAMS);
1605 timeout = __le16_to_cpu(cp->timeout);
1607 /* Disabling discoverable requires that no timeout is set,
1608 * and enabling limited discoverable requires a timeout.
1610 if ((cp->val == 0x00 && timeout > 0) ||
1611 (cp->val == 0x02 && timeout == 0))
1612 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1613 MGMT_STATUS_INVALID_PARAMS);
1617 if (!hdev_is_powered(hdev) && timeout > 0) {
1618 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1619 MGMT_STATUS_NOT_POWERED);
1623 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1624 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1625 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1630 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1631 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1632 MGMT_STATUS_REJECTED);
1636 if (!hdev_is_powered(hdev)) {
1637 bool changed = false;
1639 /* Setting limited discoverable when powered off is
1640 * not a valid operation since it requires a timeout
1641 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1643 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1644 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1648 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1653 err = new_settings(hdev, sk);
1658 /* If the current mode is the same, then just update the timeout
1659 * value with the new value. And if only the timeout gets updated,
1660 * then no need for any HCI transactions.
1662 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1663 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1664 &hdev->dev_flags)) {
1665 cancel_delayed_work(&hdev->discov_off);
1666 hdev->discov_timeout = timeout;
1668 if (cp->val && hdev->discov_timeout > 0) {
1669 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1670 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1674 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1678 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1684 /* Cancel any potential discoverable timeout that might be
1685 * still active and store new timeout value. The arming of
1686 * the timeout happens in the complete handler.
1688 cancel_delayed_work(&hdev->discov_off);
1689 hdev->discov_timeout = timeout;
1691 /* Limited discoverable mode */
1692 if (cp->val == 0x02)
1693 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1695 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1697 hci_req_init(&req, hdev);
1699 /* The procedure for LE-only controllers is much simpler - just
1700 * update the advertising data.
1702 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1708 struct hci_cp_write_current_iac_lap hci_cp;
1710 if (cp->val == 0x02) {
1711 /* Limited discoverable mode */
1712 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1713 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1714 hci_cp.iac_lap[1] = 0x8b;
1715 hci_cp.iac_lap[2] = 0x9e;
1716 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1717 hci_cp.iac_lap[4] = 0x8b;
1718 hci_cp.iac_lap[5] = 0x9e;
1720 /* General discoverable mode */
1722 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1723 hci_cp.iac_lap[1] = 0x8b;
1724 hci_cp.iac_lap[2] = 0x9e;
1727 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1728 (hci_cp.num_iac * 3) + 1, &hci_cp);
1730 scan |= SCAN_INQUIRY;
1732 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1735 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1738 update_adv_data(&req);
1740 err = hci_req_run(&req, set_discoverable_complete);
1742 mgmt_pending_remove(cmd);
1745 hci_dev_unlock(hdev);
1749 static void write_fast_connectable(struct hci_request *req, bool enable)
1751 struct hci_dev *hdev = req->hdev;
1752 struct hci_cp_write_page_scan_activity acp;
1755 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1758 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1762 type = PAGE_SCAN_TYPE_INTERLACED;
1764 /* 160 msec page scan interval */
1765 acp.interval = cpu_to_le16(0x0100);
1767 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1769 /* default 1.28 sec page scan */
1770 acp.interval = cpu_to_le16(0x0800);
1773 acp.window = cpu_to_le16(0x0012);
1775 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1776 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1777 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1780 if (hdev->page_scan_type != type)
1781 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1784 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1787 struct pending_cmd *cmd;
1788 struct mgmt_mode *cp;
1789 bool conn_changed, discov_changed;
1791 BT_DBG("status 0x%02x", status);
1795 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1800 u8 mgmt_err = mgmt_status(status);
1801 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1807 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1809 discov_changed = false;
1811 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1813 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1817 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1819 if (conn_changed || discov_changed) {
1820 new_settings(hdev, cmd->sk);
1821 hci_update_page_scan(hdev);
1823 mgmt_update_adv_data(hdev);
1824 hci_update_background_scan(hdev);
1828 mgmt_pending_remove(cmd);
1831 hci_dev_unlock(hdev);
1834 static int set_connectable_update_settings(struct hci_dev *hdev,
1835 struct sock *sk, u8 val)
1837 bool changed = false;
1840 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1844 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1846 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1847 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1850 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1855 hci_update_page_scan(hdev);
1856 hci_update_background_scan(hdev);
1857 return new_settings(hdev, sk);
1863 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1866 struct mgmt_mode *cp = data;
1867 struct pending_cmd *cmd;
1868 struct hci_request req;
1872 BT_DBG("request for %s", hdev->name);
1874 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1875 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1876 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1877 MGMT_STATUS_REJECTED);
1879 if (cp->val != 0x00 && cp->val != 0x01)
1880 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1881 MGMT_STATUS_INVALID_PARAMS);
1885 if (!hdev_is_powered(hdev)) {
1886 err = set_connectable_update_settings(hdev, sk, cp->val);
1890 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1891 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1892 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1897 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1903 hci_req_init(&req, hdev);
1905 /* If BR/EDR is not enabled and we disable advertising as a
1906 * by-product of disabling connectable, we need to update the
1907 * advertising flags.
1909 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1911 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1912 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1914 update_adv_data(&req);
1915 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1919 /* If we don't have any whitelist entries just
1920 * disable all scanning. If there are entries
1921 * and we had both page and inquiry scanning
1922 * enabled then fall back to only page scanning.
1923 * Otherwise no changes are needed.
1925 if (list_empty(&hdev->whitelist))
1926 scan = SCAN_DISABLED;
1927 else if (test_bit(HCI_ISCAN, &hdev->flags))
1930 goto no_scan_update;
1932 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1933 hdev->discov_timeout > 0)
1934 cancel_delayed_work(&hdev->discov_off);
1937 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1941 /* If we're going from non-connectable to connectable or
1942 * vice-versa when fast connectable is enabled ensure that fast
1943 * connectable gets disabled. write_fast_connectable won't do
1944 * anything if the page scan parameters are already what they
1947 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1948 write_fast_connectable(&req, false);
1950 /* Update the advertising parameters if necessary */
1951 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1952 enable_advertising(&req);
1954 err = hci_req_run(&req, set_connectable_complete);
1956 mgmt_pending_remove(cmd);
1957 if (err == -ENODATA)
1958 err = set_connectable_update_settings(hdev, sk,
1964 hci_dev_unlock(hdev);
1968 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1971 struct mgmt_mode *cp = data;
1975 BT_DBG("request for %s", hdev->name);
1977 if (cp->val != 0x00 && cp->val != 0x01)
1978 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1979 MGMT_STATUS_INVALID_PARAMS);
1984 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1986 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1988 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1993 err = new_settings(hdev, sk);
1996 hci_dev_unlock(hdev);
2000 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2003 struct mgmt_mode *cp = data;
2004 struct pending_cmd *cmd;
2008 BT_DBG("request for %s", hdev->name);
2010 status = mgmt_bredr_support(hdev);
2012 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2015 if (cp->val != 0x00 && cp->val != 0x01)
2016 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2017 MGMT_STATUS_INVALID_PARAMS);
2021 if (!hdev_is_powered(hdev)) {
2022 bool changed = false;
2024 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2025 &hdev->dev_flags)) {
2026 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2030 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2035 err = new_settings(hdev, sk);
2040 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2041 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2048 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2049 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2053 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2059 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2061 mgmt_pending_remove(cmd);
2066 hci_dev_unlock(hdev);
2070 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2072 struct mgmt_mode *cp = data;
2073 struct pending_cmd *cmd;
2077 BT_DBG("request for %s", hdev->name);
2079 status = mgmt_bredr_support(hdev);
2081 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2083 if (!lmp_ssp_capable(hdev))
2084 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2085 MGMT_STATUS_NOT_SUPPORTED);
2087 if (cp->val != 0x00 && cp->val != 0x01)
2088 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2089 MGMT_STATUS_INVALID_PARAMS);
2093 if (!hdev_is_powered(hdev)) {
2097 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2100 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2103 changed = test_and_clear_bit(HCI_HS_ENABLED,
2106 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2109 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2114 err = new_settings(hdev, sk);
2119 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2120 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2121 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2126 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2127 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2131 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2137 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2138 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2139 sizeof(cp->val), &cp->val);
2141 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2143 mgmt_pending_remove(cmd);
2148 hci_dev_unlock(hdev);
2152 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2154 struct mgmt_mode *cp = data;
2159 BT_DBG("request for %s", hdev->name);
2161 status = mgmt_bredr_support(hdev);
2163 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2165 if (!lmp_ssp_capable(hdev))
2166 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2167 MGMT_STATUS_NOT_SUPPORTED);
2169 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2170 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2171 MGMT_STATUS_REJECTED);
2173 if (cp->val != 0x00 && cp->val != 0x01)
2174 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2175 MGMT_STATUS_INVALID_PARAMS);
2180 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2182 if (hdev_is_powered(hdev)) {
2183 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2184 MGMT_STATUS_REJECTED);
2188 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2191 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2196 err = new_settings(hdev, sk);
2199 hci_dev_unlock(hdev);
2203 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2205 struct cmd_lookup match = { NULL, hdev };
2210 u8 mgmt_err = mgmt_status(status);
2212 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2217 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2219 new_settings(hdev, match.sk);
2224 /* Make sure the controller has a good default for
2225 * advertising data. Restrict the update to when LE
2226 * has actually been enabled. During power on, the
2227 * update in powered_update_hci will take care of it.
2229 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2230 struct hci_request req;
2232 hci_req_init(&req, hdev);
2233 update_adv_data(&req);
2234 update_scan_rsp_data(&req);
2235 __hci_update_background_scan(&req);
2236 hci_req_run(&req, NULL);
2240 hci_dev_unlock(hdev);
2243 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2245 struct mgmt_mode *cp = data;
2246 struct hci_cp_write_le_host_supported hci_cp;
2247 struct pending_cmd *cmd;
2248 struct hci_request req;
2252 BT_DBG("request for %s", hdev->name);
2254 if (!lmp_le_capable(hdev))
2255 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2256 MGMT_STATUS_NOT_SUPPORTED);
2258 if (cp->val != 0x00 && cp->val != 0x01)
2259 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2260 MGMT_STATUS_INVALID_PARAMS);
2262 /* LE-only devices do not allow toggling LE on/off */
2263 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2264 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2265 MGMT_STATUS_REJECTED);
2270 enabled = lmp_host_le_capable(hdev);
2272 if (!hdev_is_powered(hdev) || val == enabled) {
2273 bool changed = false;
2275 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2276 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2280 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2281 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2285 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2290 err = new_settings(hdev, sk);
2295 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2296 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2297 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2302 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2308 hci_req_init(&req, hdev);
2310 memset(&hci_cp, 0, sizeof(hci_cp));
2314 hci_cp.simul = 0x00;
2316 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2317 disable_advertising(&req);
2320 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2323 err = hci_req_run(&req, le_enable_complete);
2325 mgmt_pending_remove(cmd);
2328 hci_dev_unlock(hdev);
2332 /* This is a helper function to test for pending mgmt commands that can
2333 * cause CoD or EIR HCI commands. We can only allow one such pending
2334 * mgmt command at a time since otherwise we cannot easily track what
2335 * the current values are, will be, and based on that calculate if a new
2336 * HCI command needs to be sent and if yes with what value.
2338 static bool pending_eir_or_class(struct hci_dev *hdev)
2340 struct pending_cmd *cmd;
2342 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2343 switch (cmd->opcode) {
2344 case MGMT_OP_ADD_UUID:
2345 case MGMT_OP_REMOVE_UUID:
2346 case MGMT_OP_SET_DEV_CLASS:
2347 case MGMT_OP_SET_POWERED:
2355 static const u8 bluetooth_base_uuid[] = {
2356 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2357 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2360 static u8 get_uuid_size(const u8 *uuid)
2364 if (memcmp(uuid, bluetooth_base_uuid, 12))
2367 val = get_unaligned_le32(&uuid[12]);
2374 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2376 struct pending_cmd *cmd;
2380 cmd = mgmt_pending_find(mgmt_op, hdev);
2384 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2385 hdev->dev_class, 3);
2387 mgmt_pending_remove(cmd);
2390 hci_dev_unlock(hdev);
2393 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2395 BT_DBG("status 0x%02x", status);
2397 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2400 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2402 struct mgmt_cp_add_uuid *cp = data;
2403 struct pending_cmd *cmd;
2404 struct hci_request req;
2405 struct bt_uuid *uuid;
2408 BT_DBG("request for %s", hdev->name);
2412 if (pending_eir_or_class(hdev)) {
2413 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2418 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2424 memcpy(uuid->uuid, cp->uuid, 16);
2425 uuid->svc_hint = cp->svc_hint;
2426 uuid->size = get_uuid_size(cp->uuid);
2428 list_add_tail(&uuid->list, &hdev->uuids);
2430 hci_req_init(&req, hdev);
2435 err = hci_req_run(&req, add_uuid_complete);
2437 if (err != -ENODATA)
2440 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2441 hdev->dev_class, 3);
2445 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2454 hci_dev_unlock(hdev);
2458 static bool enable_service_cache(struct hci_dev *hdev)
2460 if (!hdev_is_powered(hdev))
2463 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2464 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2472 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2474 BT_DBG("status 0x%02x", status);
2476 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2479 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2482 struct mgmt_cp_remove_uuid *cp = data;
2483 struct pending_cmd *cmd;
2484 struct bt_uuid *match, *tmp;
2485 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2486 struct hci_request req;
2489 BT_DBG("request for %s", hdev->name);
2493 if (pending_eir_or_class(hdev)) {
2494 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2499 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2500 hci_uuids_clear(hdev);
2502 if (enable_service_cache(hdev)) {
2503 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2504 0, hdev->dev_class, 3);
2513 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2514 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2517 list_del(&match->list);
2523 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2524 MGMT_STATUS_INVALID_PARAMS);
2529 hci_req_init(&req, hdev);
2534 err = hci_req_run(&req, remove_uuid_complete);
2536 if (err != -ENODATA)
2539 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2540 hdev->dev_class, 3);
2544 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2553 hci_dev_unlock(hdev);
2557 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2559 BT_DBG("status 0x%02x", status);
2561 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2564 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2567 struct mgmt_cp_set_dev_class *cp = data;
2568 struct pending_cmd *cmd;
2569 struct hci_request req;
2572 BT_DBG("request for %s", hdev->name);
2574 if (!lmp_bredr_capable(hdev))
2575 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2576 MGMT_STATUS_NOT_SUPPORTED);
2580 if (pending_eir_or_class(hdev)) {
2581 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2586 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2587 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2588 MGMT_STATUS_INVALID_PARAMS);
2592 hdev->major_class = cp->major;
2593 hdev->minor_class = cp->minor;
2595 if (!hdev_is_powered(hdev)) {
2596 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2597 hdev->dev_class, 3);
2601 hci_req_init(&req, hdev);
2603 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2604 hci_dev_unlock(hdev);
2605 cancel_delayed_work_sync(&hdev->service_cache);
2612 err = hci_req_run(&req, set_class_complete);
2614 if (err != -ENODATA)
2617 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2618 hdev->dev_class, 3);
2622 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2631 hci_dev_unlock(hdev);
2635 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2638 struct mgmt_cp_load_link_keys *cp = data;
2639 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2640 sizeof(struct mgmt_link_key_info));
2641 u16 key_count, expected_len;
2645 BT_DBG("request for %s", hdev->name);
2647 if (!lmp_bredr_capable(hdev))
2648 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2649 MGMT_STATUS_NOT_SUPPORTED);
2651 key_count = __le16_to_cpu(cp->key_count);
2652 if (key_count > max_key_count) {
2653 BT_ERR("load_link_keys: too big key_count value %u",
2655 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2656 MGMT_STATUS_INVALID_PARAMS);
2659 expected_len = sizeof(*cp) + key_count *
2660 sizeof(struct mgmt_link_key_info);
2661 if (expected_len != len) {
2662 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2664 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2665 MGMT_STATUS_INVALID_PARAMS);
2668 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2669 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2670 MGMT_STATUS_INVALID_PARAMS);
2672 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2675 for (i = 0; i < key_count; i++) {
2676 struct mgmt_link_key_info *key = &cp->keys[i];
2678 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2679 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2680 MGMT_STATUS_INVALID_PARAMS);
2685 hci_link_keys_clear(hdev);
2688 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2691 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2695 new_settings(hdev, NULL);
2697 for (i = 0; i < key_count; i++) {
2698 struct mgmt_link_key_info *key = &cp->keys[i];
2700 /* Always ignore debug keys and require a new pairing if
2701 * the user wants to use them.
2703 if (key->type == HCI_LK_DEBUG_COMBINATION)
2706 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2707 key->type, key->pin_len, NULL);
2710 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2712 hci_dev_unlock(hdev);
2717 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2718 u8 addr_type, struct sock *skip_sk)
2720 struct mgmt_ev_device_unpaired ev;
2722 bacpy(&ev.addr.bdaddr, bdaddr);
2723 ev.addr.type = addr_type;
2725 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2729 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2732 struct mgmt_cp_unpair_device *cp = data;
2733 struct mgmt_rp_unpair_device rp;
2734 struct hci_cp_disconnect dc;
2735 struct pending_cmd *cmd;
2736 struct hci_conn *conn;
2739 memset(&rp, 0, sizeof(rp));
2740 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2741 rp.addr.type = cp->addr.type;
2743 if (!bdaddr_type_is_valid(cp->addr.type))
2744 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2745 MGMT_STATUS_INVALID_PARAMS,
2748 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2749 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2750 MGMT_STATUS_INVALID_PARAMS,
2755 if (!hdev_is_powered(hdev)) {
2756 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2757 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2761 if (cp->addr.type == BDADDR_BREDR) {
2762 /* If disconnection is requested, then look up the
2763 * connection. If the remote device is connected, it
2764 * will be later used to terminate the link.
2766 * Setting it to NULL explicitly will cause no
2767 * termination of the link.
2770 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2775 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2779 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2782 /* Defer clearing up the connection parameters
2783 * until closing to give a chance of keeping
2784 * them if a repairing happens.
2786 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2788 /* If disconnection is not requested, then
2789 * clear the connection variable so that the
2790 * link is not terminated.
2792 if (!cp->disconnect)
2796 if (cp->addr.type == BDADDR_LE_PUBLIC)
2797 addr_type = ADDR_LE_DEV_PUBLIC;
2799 addr_type = ADDR_LE_DEV_RANDOM;
2801 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2803 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2807 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2808 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2812 /* If the connection variable is set, then termination of the
2813 * link is requested.
2816 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2818 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2822 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2829 cmd->cmd_complete = addr_cmd_complete;
2831 dc.handle = cpu_to_le16(conn->handle);
2832 dc.reason = 0x13; /* Remote User Terminated Connection */
2833 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2835 mgmt_pending_remove(cmd);
2838 hci_dev_unlock(hdev);
2842 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2845 struct mgmt_cp_disconnect *cp = data;
2846 struct mgmt_rp_disconnect rp;
2847 struct pending_cmd *cmd;
2848 struct hci_conn *conn;
2853 memset(&rp, 0, sizeof(rp));
2854 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2855 rp.addr.type = cp->addr.type;
2857 if (!bdaddr_type_is_valid(cp->addr.type))
2858 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2859 MGMT_STATUS_INVALID_PARAMS,
2864 if (!test_bit(HCI_UP, &hdev->flags)) {
2865 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2866 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2870 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2871 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2872 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2876 if (cp->addr.type == BDADDR_BREDR)
2877 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2880 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2882 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2883 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2884 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2888 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2894 cmd->cmd_complete = generic_cmd_complete;
2896 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2898 mgmt_pending_remove(cmd);
2901 hci_dev_unlock(hdev);
2905 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2907 switch (link_type) {
2909 switch (addr_type) {
2910 case ADDR_LE_DEV_PUBLIC:
2911 return BDADDR_LE_PUBLIC;
2914 /* Fallback to LE Random address type */
2915 return BDADDR_LE_RANDOM;
2919 /* Fallback to BR/EDR type */
2920 return BDADDR_BREDR;
2924 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2927 struct mgmt_rp_get_connections *rp;
2937 if (!hdev_is_powered(hdev)) {
2938 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2939 MGMT_STATUS_NOT_POWERED);
2944 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2945 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2949 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2950 rp = kmalloc(rp_len, GFP_KERNEL);
2957 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2958 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2960 bacpy(&rp->addr[i].bdaddr, &c->dst);
2961 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2962 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2967 rp->conn_count = cpu_to_le16(i);
2969 /* Recalculate length in case of filtered SCO connections, etc */
2970 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2972 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2978 hci_dev_unlock(hdev);
2982 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2983 struct mgmt_cp_pin_code_neg_reply *cp)
2985 struct pending_cmd *cmd;
2988 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2993 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2994 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2996 mgmt_pending_remove(cmd);
3001 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3004 struct hci_conn *conn;
3005 struct mgmt_cp_pin_code_reply *cp = data;
3006 struct hci_cp_pin_code_reply reply;
3007 struct pending_cmd *cmd;
3014 if (!hdev_is_powered(hdev)) {
3015 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3016 MGMT_STATUS_NOT_POWERED);
3020 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3022 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3023 MGMT_STATUS_NOT_CONNECTED);
3027 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3028 struct mgmt_cp_pin_code_neg_reply ncp;
3030 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3032 BT_ERR("PIN code is not 16 bytes long");
3034 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3036 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3037 MGMT_STATUS_INVALID_PARAMS);
3042 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3048 cmd->cmd_complete = addr_cmd_complete;
3050 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3051 reply.pin_len = cp->pin_len;
3052 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3054 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3056 mgmt_pending_remove(cmd);
3059 hci_dev_unlock(hdev);
3063 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3066 struct mgmt_cp_set_io_capability *cp = data;
3070 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3071 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3072 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3076 hdev->io_capability = cp->io_capability;
3078 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3079 hdev->io_capability);
3081 hci_dev_unlock(hdev);
3083 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3087 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3089 struct hci_dev *hdev = conn->hdev;
3090 struct pending_cmd *cmd;
3092 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3093 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3096 if (cmd->user_data != conn)
3105 static int pairing_complete(struct pending_cmd *cmd, u8 status)
3107 struct mgmt_rp_pair_device rp;
3108 struct hci_conn *conn = cmd->user_data;
3111 bacpy(&rp.addr.bdaddr, &conn->dst);
3112 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3114 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3117 /* So we don't get further callbacks for this connection */
3118 conn->connect_cfm_cb = NULL;
3119 conn->security_cfm_cb = NULL;
3120 conn->disconn_cfm_cb = NULL;
3122 hci_conn_drop(conn);
3124 /* The device is paired so there is no need to remove
3125 * its connection parameters anymore.
3127 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3134 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3136 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3137 struct pending_cmd *cmd;
3139 cmd = find_pairing(conn);
3141 cmd->cmd_complete(cmd, status);
3142 mgmt_pending_remove(cmd);
3146 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3148 struct pending_cmd *cmd;
3150 BT_DBG("status %u", status);
3152 cmd = find_pairing(conn);
3154 BT_DBG("Unable to find a pending command");
3158 cmd->cmd_complete(cmd, mgmt_status(status));
3159 mgmt_pending_remove(cmd);
3162 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3164 struct pending_cmd *cmd;
3166 BT_DBG("status %u", status);
3171 cmd = find_pairing(conn);
3173 BT_DBG("Unable to find a pending command");
3177 cmd->cmd_complete(cmd, mgmt_status(status));
3178 mgmt_pending_remove(cmd);
3181 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3184 struct mgmt_cp_pair_device *cp = data;
3185 struct mgmt_rp_pair_device rp;
3186 struct pending_cmd *cmd;
3187 u8 sec_level, auth_type;
3188 struct hci_conn *conn;
3193 memset(&rp, 0, sizeof(rp));
3194 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3195 rp.addr.type = cp->addr.type;
3197 if (!bdaddr_type_is_valid(cp->addr.type))
3198 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3199 MGMT_STATUS_INVALID_PARAMS,
3202 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3203 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3204 MGMT_STATUS_INVALID_PARAMS,
3209 if (!hdev_is_powered(hdev)) {
3210 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3211 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3215 sec_level = BT_SECURITY_MEDIUM;
3216 auth_type = HCI_AT_DEDICATED_BONDING;
3218 if (cp->addr.type == BDADDR_BREDR) {
3219 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3224 /* Convert from L2CAP channel address type to HCI address type
3226 if (cp->addr.type == BDADDR_LE_PUBLIC)
3227 addr_type = ADDR_LE_DEV_PUBLIC;
3229 addr_type = ADDR_LE_DEV_RANDOM;
3231 /* When pairing a new device, it is expected to remember
3232 * this device for future connections. Adding the connection
3233 * parameter information ahead of time allows tracking
3234 * of the slave preferred values and will speed up any
3235 * further connection establishment.
3237 * If connection parameters already exist, then they
3238 * will be kept and this function does nothing.
3240 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3242 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3243 sec_level, HCI_LE_CONN_TIMEOUT,
3250 if (PTR_ERR(conn) == -EBUSY)
3251 status = MGMT_STATUS_BUSY;
3252 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3253 status = MGMT_STATUS_NOT_SUPPORTED;
3254 else if (PTR_ERR(conn) == -ECONNREFUSED)
3255 status = MGMT_STATUS_REJECTED;
3257 status = MGMT_STATUS_CONNECT_FAILED;
3259 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3265 if (conn->connect_cfm_cb) {
3266 hci_conn_drop(conn);
3267 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3268 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3272 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3275 hci_conn_drop(conn);
3279 cmd->cmd_complete = pairing_complete;
3281 /* For LE, just connecting isn't a proof that the pairing finished */
3282 if (cp->addr.type == BDADDR_BREDR) {
3283 conn->connect_cfm_cb = pairing_complete_cb;
3284 conn->security_cfm_cb = pairing_complete_cb;
3285 conn->disconn_cfm_cb = pairing_complete_cb;
3287 conn->connect_cfm_cb = le_pairing_complete_cb;
3288 conn->security_cfm_cb = le_pairing_complete_cb;
3289 conn->disconn_cfm_cb = le_pairing_complete_cb;
3292 conn->io_capability = cp->io_cap;
3293 cmd->user_data = hci_conn_get(conn);
3295 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3296 hci_conn_security(conn, sec_level, auth_type, true)) {
3297 cmd->cmd_complete(cmd, 0);
3298 mgmt_pending_remove(cmd);
3304 hci_dev_unlock(hdev);
3308 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3311 struct mgmt_addr_info *addr = data;
3312 struct pending_cmd *cmd;
3313 struct hci_conn *conn;
3320 if (!hdev_is_powered(hdev)) {
3321 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3322 MGMT_STATUS_NOT_POWERED);
3326 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3328 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3329 MGMT_STATUS_INVALID_PARAMS);
3333 conn = cmd->user_data;
3335 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3336 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3337 MGMT_STATUS_INVALID_PARAMS);
3341 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3342 mgmt_pending_remove(cmd);
3344 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3345 addr, sizeof(*addr));
3347 hci_dev_unlock(hdev);
3351 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3352 struct mgmt_addr_info *addr, u16 mgmt_op,
3353 u16 hci_op, __le32 passkey)
3355 struct pending_cmd *cmd;
3356 struct hci_conn *conn;
3361 if (!hdev_is_powered(hdev)) {
3362 err = cmd_complete(sk, hdev->id, mgmt_op,
3363 MGMT_STATUS_NOT_POWERED, addr,
3368 if (addr->type == BDADDR_BREDR)
3369 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3371 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3374 err = cmd_complete(sk, hdev->id, mgmt_op,
3375 MGMT_STATUS_NOT_CONNECTED, addr,
3380 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3381 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3383 err = cmd_complete(sk, hdev->id, mgmt_op,
3384 MGMT_STATUS_SUCCESS, addr,
3387 err = cmd_complete(sk, hdev->id, mgmt_op,
3388 MGMT_STATUS_FAILED, addr,
3394 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3400 cmd->cmd_complete = addr_cmd_complete;
3402 /* Continue with pairing via HCI */
3403 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3404 struct hci_cp_user_passkey_reply cp;
3406 bacpy(&cp.bdaddr, &addr->bdaddr);
3407 cp.passkey = passkey;
3408 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3410 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3414 mgmt_pending_remove(cmd);
3417 hci_dev_unlock(hdev);
3421 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3422 void *data, u16 len)
3424 struct mgmt_cp_pin_code_neg_reply *cp = data;
3428 return user_pairing_resp(sk, hdev, &cp->addr,
3429 MGMT_OP_PIN_CODE_NEG_REPLY,
3430 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3433 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3436 struct mgmt_cp_user_confirm_reply *cp = data;
3440 if (len != sizeof(*cp))
3441 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3442 MGMT_STATUS_INVALID_PARAMS);
3444 return user_pairing_resp(sk, hdev, &cp->addr,
3445 MGMT_OP_USER_CONFIRM_REPLY,
3446 HCI_OP_USER_CONFIRM_REPLY, 0);
3449 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3450 void *data, u16 len)
3452 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3456 return user_pairing_resp(sk, hdev, &cp->addr,
3457 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3458 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3461 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3464 struct mgmt_cp_user_passkey_reply *cp = data;
3468 return user_pairing_resp(sk, hdev, &cp->addr,
3469 MGMT_OP_USER_PASSKEY_REPLY,
3470 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3473 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3474 void *data, u16 len)
3476 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3480 return user_pairing_resp(sk, hdev, &cp->addr,
3481 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3482 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3485 static void update_name(struct hci_request *req)
3487 struct hci_dev *hdev = req->hdev;
3488 struct hci_cp_write_local_name cp;
3490 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3492 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3495 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3497 struct mgmt_cp_set_local_name *cp;
3498 struct pending_cmd *cmd;
3500 BT_DBG("status 0x%02x", status);
3504 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3511 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3512 mgmt_status(status));
3514 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3517 mgmt_pending_remove(cmd);
3520 hci_dev_unlock(hdev);
3523 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3526 struct mgmt_cp_set_local_name *cp = data;
3527 struct pending_cmd *cmd;
3528 struct hci_request req;
3535 /* If the old values are the same as the new ones just return a
3536 * direct command complete event.
3538 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3539 !memcmp(hdev->short_name, cp->short_name,
3540 sizeof(hdev->short_name))) {
3541 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3546 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3548 if (!hdev_is_powered(hdev)) {
3549 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3551 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3556 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3562 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3568 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3570 hci_req_init(&req, hdev);
3572 if (lmp_bredr_capable(hdev)) {
3577 /* The name is stored in the scan response data and so
3578 * no need to udpate the advertising data here.
3580 if (lmp_le_capable(hdev))
3581 update_scan_rsp_data(&req);
3583 err = hci_req_run(&req, set_name_complete);
3585 mgmt_pending_remove(cmd);
3588 hci_dev_unlock(hdev);
3592 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3593 void *data, u16 data_len)
3595 struct pending_cmd *cmd;
3598 BT_DBG("%s", hdev->name);
3602 if (!hdev_is_powered(hdev)) {
3603 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3604 MGMT_STATUS_NOT_POWERED);
3608 if (!lmp_ssp_capable(hdev)) {
3609 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3610 MGMT_STATUS_NOT_SUPPORTED);
3614 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3615 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3620 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3626 if (bredr_sc_enabled(hdev))
3627 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3630 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3633 mgmt_pending_remove(cmd);
3636 hci_dev_unlock(hdev);
3640 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3641 void *data, u16 len)
3643 struct mgmt_addr_info *addr = data;
3646 BT_DBG("%s ", hdev->name);
3648 if (!bdaddr_type_is_valid(addr->type))
3649 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3650 MGMT_STATUS_INVALID_PARAMS, addr,
3655 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3656 struct mgmt_cp_add_remote_oob_data *cp = data;
3659 if (cp->addr.type != BDADDR_BREDR) {
3660 err = cmd_complete(sk, hdev->id,
3661 MGMT_OP_ADD_REMOTE_OOB_DATA,
3662 MGMT_STATUS_INVALID_PARAMS,
3663 &cp->addr, sizeof(cp->addr));
3667 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3668 cp->addr.type, cp->hash,
3669 cp->rand, NULL, NULL);
3671 status = MGMT_STATUS_FAILED;
3673 status = MGMT_STATUS_SUCCESS;
3675 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3676 status, &cp->addr, sizeof(cp->addr));
3677 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3678 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3679 u8 *rand192, *hash192, *rand256, *hash256;
3682 if (bdaddr_type_is_le(cp->addr.type)) {
3683 /* Enforce zero-valued 192-bit parameters as
3684 * long as legacy SMP OOB isn't implemented.
3686 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3687 memcmp(cp->hash192, ZERO_KEY, 16)) {
3688 err = cmd_complete(sk, hdev->id,
3689 MGMT_OP_ADD_REMOTE_OOB_DATA,
3690 MGMT_STATUS_INVALID_PARAMS,
3691 addr, sizeof(*addr));
3698 /* In case one of the P-192 values is set to zero,
3699 * then just disable OOB data for P-192.
3701 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3702 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3706 rand192 = cp->rand192;
3707 hash192 = cp->hash192;
3711 /* In case one of the P-256 values is set to zero, then just
3712 * disable OOB data for P-256.
3714 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3715 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3719 rand256 = cp->rand256;
3720 hash256 = cp->hash256;
3723 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3724 cp->addr.type, hash192, rand192,
3727 status = MGMT_STATUS_FAILED;
3729 status = MGMT_STATUS_SUCCESS;
3731 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3732 status, &cp->addr, sizeof(cp->addr));
3734 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3735 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3736 MGMT_STATUS_INVALID_PARAMS);
3740 hci_dev_unlock(hdev);
3744 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3745 void *data, u16 len)
3747 struct mgmt_cp_remove_remote_oob_data *cp = data;
3751 BT_DBG("%s", hdev->name);
3753 if (cp->addr.type != BDADDR_BREDR)
3754 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3755 MGMT_STATUS_INVALID_PARAMS,
3756 &cp->addr, sizeof(cp->addr));
3760 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3761 hci_remote_oob_data_clear(hdev);
3762 status = MGMT_STATUS_SUCCESS;
3766 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3768 status = MGMT_STATUS_INVALID_PARAMS;
3770 status = MGMT_STATUS_SUCCESS;
3773 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3774 status, &cp->addr, sizeof(cp->addr));
3776 hci_dev_unlock(hdev);
3780 static bool trigger_discovery(struct hci_request *req, u8 *status)
3782 struct hci_dev *hdev = req->hdev;
3783 struct hci_cp_le_set_scan_param param_cp;
3784 struct hci_cp_le_set_scan_enable enable_cp;
3785 struct hci_cp_inquiry inq_cp;
3786 /* General inquiry access code (GIAC) */
3787 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3791 switch (hdev->discovery.type) {
3792 case DISCOV_TYPE_BREDR:
3793 *status = mgmt_bredr_support(hdev);
3797 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3798 *status = MGMT_STATUS_BUSY;
3802 hci_inquiry_cache_flush(hdev);
3804 memset(&inq_cp, 0, sizeof(inq_cp));
3805 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3806 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3807 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3810 case DISCOV_TYPE_LE:
3811 case DISCOV_TYPE_INTERLEAVED:
3812 *status = mgmt_le_support(hdev);
3816 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3817 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3818 *status = MGMT_STATUS_NOT_SUPPORTED;
3822 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3823 /* Don't let discovery abort an outgoing
3824 * connection attempt that's using directed
3827 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3829 *status = MGMT_STATUS_REJECTED;
3833 disable_advertising(req);
3836 /* If controller is scanning, it means the background scanning
3837 * is running. Thus, we should temporarily stop it in order to
3838 * set the discovery scanning parameters.
3840 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3841 hci_req_add_le_scan_disable(req);
3843 memset(¶m_cp, 0, sizeof(param_cp));
3845 /* All active scans will be done with either a resolvable
3846 * private address (when privacy feature has been enabled)
3847 * or non-resolvable private address.
3849 err = hci_update_random_address(req, true, &own_addr_type);
3851 *status = MGMT_STATUS_FAILED;
3855 param_cp.type = LE_SCAN_ACTIVE;
3856 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3857 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3858 param_cp.own_address_type = own_addr_type;
3859 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3862 memset(&enable_cp, 0, sizeof(enable_cp));
3863 enable_cp.enable = LE_SCAN_ENABLE;
3864 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3865 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3870 *status = MGMT_STATUS_INVALID_PARAMS;
3877 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3880 struct pending_cmd *cmd;
3881 unsigned long timeout;
3883 BT_DBG("status %d", status);
3887 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3889 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3892 cmd->cmd_complete(cmd, mgmt_status(status));
3893 mgmt_pending_remove(cmd);
3897 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3901 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3903 /* If the scan involves LE scan, pick proper timeout to schedule
3904 * hdev->le_scan_disable that will stop it.
3906 switch (hdev->discovery.type) {
3907 case DISCOV_TYPE_LE:
3908 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3910 case DISCOV_TYPE_INTERLEAVED:
3911 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3913 case DISCOV_TYPE_BREDR:
3917 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3923 /* When service discovery is used and the controller has
3924 * a strict duplicate filter, it is important to remember
3925 * the start and duration of the scan. This is required
3926 * for restarting scanning during the discovery phase.
3928 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
3930 (hdev->discovery.uuid_count > 0 ||
3931 hdev->discovery.rssi != HCI_RSSI_INVALID)) {
3932 hdev->discovery.scan_start = jiffies;
3933 hdev->discovery.scan_duration = timeout;
3936 queue_delayed_work(hdev->workqueue,
3937 &hdev->le_scan_disable, timeout);
3941 hci_dev_unlock(hdev);
3944 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3945 void *data, u16 len)
3947 struct mgmt_cp_start_discovery *cp = data;
3948 struct pending_cmd *cmd;
3949 struct hci_request req;
3953 BT_DBG("%s", hdev->name);
3957 if (!hdev_is_powered(hdev)) {
3958 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3959 MGMT_STATUS_NOT_POWERED,
3960 &cp->type, sizeof(cp->type));
3964 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3965 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3966 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3967 MGMT_STATUS_BUSY, &cp->type,
3972 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3978 cmd->cmd_complete = generic_cmd_complete;
3980 /* Clear the discovery filter first to free any previously
3981 * allocated memory for the UUID list.
3983 hci_discovery_filter_clear(hdev);
3985 hdev->discovery.type = cp->type;
3986 hdev->discovery.report_invalid_rssi = false;
3988 hci_req_init(&req, hdev);
3990 if (!trigger_discovery(&req, &status)) {
3991 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3992 status, &cp->type, sizeof(cp->type));
3993 mgmt_pending_remove(cmd);
3997 err = hci_req_run(&req, start_discovery_complete);
3999 mgmt_pending_remove(cmd);
4003 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4006 hci_dev_unlock(hdev);
4010 static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
4012 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4016 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4017 void *data, u16 len)
4019 struct mgmt_cp_start_service_discovery *cp = data;
4020 struct pending_cmd *cmd;
4021 struct hci_request req;
4022 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4023 u16 uuid_count, expected_len;
4027 BT_DBG("%s", hdev->name);
4031 if (!hdev_is_powered(hdev)) {
4032 err = cmd_complete(sk, hdev->id,
4033 MGMT_OP_START_SERVICE_DISCOVERY,
4034 MGMT_STATUS_NOT_POWERED,
4035 &cp->type, sizeof(cp->type));
4039 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4040 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
4041 err = cmd_complete(sk, hdev->id,
4042 MGMT_OP_START_SERVICE_DISCOVERY,
4043 MGMT_STATUS_BUSY, &cp->type,
4048 uuid_count = __le16_to_cpu(cp->uuid_count);
4049 if (uuid_count > max_uuid_count) {
4050 BT_ERR("service_discovery: too big uuid_count value %u",
4052 err = cmd_complete(sk, hdev->id,
4053 MGMT_OP_START_SERVICE_DISCOVERY,
4054 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4059 expected_len = sizeof(*cp) + uuid_count * 16;
4060 if (expected_len != len) {
4061 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4063 err = cmd_complete(sk, hdev->id,
4064 MGMT_OP_START_SERVICE_DISCOVERY,
4065 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4070 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4077 cmd->cmd_complete = service_discovery_cmd_complete;
4079 /* Clear the discovery filter first to free any previously
4080 * allocated memory for the UUID list.
4082 hci_discovery_filter_clear(hdev);
4084 hdev->discovery.type = cp->type;
4085 hdev->discovery.rssi = cp->rssi;
4086 hdev->discovery.uuid_count = uuid_count;
4088 if (uuid_count > 0) {
4089 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4091 if (!hdev->discovery.uuids) {
4092 err = cmd_complete(sk, hdev->id,
4093 MGMT_OP_START_SERVICE_DISCOVERY,
4095 &cp->type, sizeof(cp->type));
4096 mgmt_pending_remove(cmd);
4101 hci_req_init(&req, hdev);
4103 if (!trigger_discovery(&req, &status)) {
4104 err = cmd_complete(sk, hdev->id,
4105 MGMT_OP_START_SERVICE_DISCOVERY,
4106 status, &cp->type, sizeof(cp->type));
4107 mgmt_pending_remove(cmd);
4111 err = hci_req_run(&req, start_discovery_complete);
4113 mgmt_pending_remove(cmd);
4117 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4120 hci_dev_unlock(hdev);
4124 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4126 struct pending_cmd *cmd;
4128 BT_DBG("status %d", status);
4132 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4134 cmd->cmd_complete(cmd, mgmt_status(status));
4135 mgmt_pending_remove(cmd);
4139 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4141 hci_dev_unlock(hdev);
4144 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4147 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4148 struct pending_cmd *cmd;
4149 struct hci_request req;
4152 BT_DBG("%s", hdev->name);
4156 if (!hci_discovery_active(hdev)) {
4157 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4158 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4159 sizeof(mgmt_cp->type));
4163 if (hdev->discovery.type != mgmt_cp->type) {
4164 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4165 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4166 sizeof(mgmt_cp->type));
4170 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4176 cmd->cmd_complete = generic_cmd_complete;
4178 hci_req_init(&req, hdev);
4180 hci_stop_discovery(&req);
4182 err = hci_req_run(&req, stop_discovery_complete);
4184 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4188 mgmt_pending_remove(cmd);
4190 /* If no HCI commands were sent we're done */
4191 if (err == -ENODATA) {
4192 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4193 &mgmt_cp->type, sizeof(mgmt_cp->type));
4194 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4198 hci_dev_unlock(hdev);
4202 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4205 struct mgmt_cp_confirm_name *cp = data;
4206 struct inquiry_entry *e;
4209 BT_DBG("%s", hdev->name);
4213 if (!hci_discovery_active(hdev)) {
4214 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4215 MGMT_STATUS_FAILED, &cp->addr,
4220 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4222 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4223 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4228 if (cp->name_known) {
4229 e->name_state = NAME_KNOWN;
4232 e->name_state = NAME_NEEDED;
4233 hci_inquiry_cache_update_resolve(hdev, e);
4236 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4240 hci_dev_unlock(hdev);
4244 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4247 struct mgmt_cp_block_device *cp = data;
4251 BT_DBG("%s", hdev->name);
4253 if (!bdaddr_type_is_valid(cp->addr.type))
4254 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4255 MGMT_STATUS_INVALID_PARAMS,
4256 &cp->addr, sizeof(cp->addr));
4260 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4263 status = MGMT_STATUS_FAILED;
4267 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4269 status = MGMT_STATUS_SUCCESS;
4272 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4273 &cp->addr, sizeof(cp->addr));
4275 hci_dev_unlock(hdev);
4280 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4283 struct mgmt_cp_unblock_device *cp = data;
4287 BT_DBG("%s", hdev->name);
4289 if (!bdaddr_type_is_valid(cp->addr.type))
4290 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4291 MGMT_STATUS_INVALID_PARAMS,
4292 &cp->addr, sizeof(cp->addr));
4296 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4299 status = MGMT_STATUS_INVALID_PARAMS;
4303 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4305 status = MGMT_STATUS_SUCCESS;
4308 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4309 &cp->addr, sizeof(cp->addr));
4311 hci_dev_unlock(hdev);
4316 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4319 struct mgmt_cp_set_device_id *cp = data;
4320 struct hci_request req;
4324 BT_DBG("%s", hdev->name);
4326 source = __le16_to_cpu(cp->source);
4328 if (source > 0x0002)
4329 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4330 MGMT_STATUS_INVALID_PARAMS);
4334 hdev->devid_source = source;
4335 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4336 hdev->devid_product = __le16_to_cpu(cp->product);
4337 hdev->devid_version = __le16_to_cpu(cp->version);
4339 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4341 hci_req_init(&req, hdev);
4343 hci_req_run(&req, NULL);
4345 hci_dev_unlock(hdev);
4350 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4353 struct cmd_lookup match = { NULL, hdev };
4358 u8 mgmt_err = mgmt_status(status);
4360 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4361 cmd_status_rsp, &mgmt_err);
4365 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4366 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4368 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4370 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4373 new_settings(hdev, match.sk);
4379 hci_dev_unlock(hdev);
4382 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4385 struct mgmt_mode *cp = data;
4386 struct pending_cmd *cmd;
4387 struct hci_request req;
4388 u8 val, enabled, status;
4391 BT_DBG("request for %s", hdev->name);
4393 status = mgmt_le_support(hdev);
4395 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4398 if (cp->val != 0x00 && cp->val != 0x01)
4399 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4400 MGMT_STATUS_INVALID_PARAMS);
4405 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4407 /* The following conditions are ones which mean that we should
4408 * not do any HCI communication but directly send a mgmt
4409 * response to user space (after toggling the flag if
4412 if (!hdev_is_powered(hdev) || val == enabled ||
4413 hci_conn_num(hdev, LE_LINK) > 0 ||
4414 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4415 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4416 bool changed = false;
4418 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4419 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4423 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4428 err = new_settings(hdev, sk);
4433 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4434 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4435 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4440 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4446 hci_req_init(&req, hdev);
4449 enable_advertising(&req);
4451 disable_advertising(&req);
4453 err = hci_req_run(&req, set_advertising_complete);
4455 mgmt_pending_remove(cmd);
4458 hci_dev_unlock(hdev);
4462 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4463 void *data, u16 len)
4465 struct mgmt_cp_set_static_address *cp = data;
4468 BT_DBG("%s", hdev->name);
4470 if (!lmp_le_capable(hdev))
4471 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4472 MGMT_STATUS_NOT_SUPPORTED);
4474 if (hdev_is_powered(hdev))
4475 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4476 MGMT_STATUS_REJECTED);
4478 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4479 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4480 return cmd_status(sk, hdev->id,
4481 MGMT_OP_SET_STATIC_ADDRESS,
4482 MGMT_STATUS_INVALID_PARAMS);
4484 /* Two most significant bits shall be set */
4485 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4486 return cmd_status(sk, hdev->id,
4487 MGMT_OP_SET_STATIC_ADDRESS,
4488 MGMT_STATUS_INVALID_PARAMS);
4493 bacpy(&hdev->static_addr, &cp->bdaddr);
4495 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4497 hci_dev_unlock(hdev);
4502 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4503 void *data, u16 len)
4505 struct mgmt_cp_set_scan_params *cp = data;
4506 __u16 interval, window;
4509 BT_DBG("%s", hdev->name);
4511 if (!lmp_le_capable(hdev))
4512 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4513 MGMT_STATUS_NOT_SUPPORTED);
4515 interval = __le16_to_cpu(cp->interval);
4517 if (interval < 0x0004 || interval > 0x4000)
4518 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4519 MGMT_STATUS_INVALID_PARAMS);
4521 window = __le16_to_cpu(cp->window);
4523 if (window < 0x0004 || window > 0x4000)
4524 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4525 MGMT_STATUS_INVALID_PARAMS);
4527 if (window > interval)
4528 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4529 MGMT_STATUS_INVALID_PARAMS);
4533 hdev->le_scan_interval = interval;
4534 hdev->le_scan_window = window;
4536 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4538 /* If background scan is running, restart it so new parameters are
4541 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4542 hdev->discovery.state == DISCOVERY_STOPPED) {
4543 struct hci_request req;
4545 hci_req_init(&req, hdev);
4547 hci_req_add_le_scan_disable(&req);
4548 hci_req_add_le_passive_scan(&req);
4550 hci_req_run(&req, NULL);
4553 hci_dev_unlock(hdev);
4558 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4561 struct pending_cmd *cmd;
4563 BT_DBG("status 0x%02x", status);
4567 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4572 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4573 mgmt_status(status));
4575 struct mgmt_mode *cp = cmd->param;
4578 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4580 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4582 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4583 new_settings(hdev, cmd->sk);
4586 mgmt_pending_remove(cmd);
4589 hci_dev_unlock(hdev);
4592 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4593 void *data, u16 len)
4595 struct mgmt_mode *cp = data;
4596 struct pending_cmd *cmd;
4597 struct hci_request req;
4600 BT_DBG("%s", hdev->name);
4602 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4603 hdev->hci_ver < BLUETOOTH_VER_1_2)
4604 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4605 MGMT_STATUS_NOT_SUPPORTED);
4607 if (cp->val != 0x00 && cp->val != 0x01)
4608 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4609 MGMT_STATUS_INVALID_PARAMS);
4611 if (!hdev_is_powered(hdev))
4612 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4613 MGMT_STATUS_NOT_POWERED);
4615 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4616 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4617 MGMT_STATUS_REJECTED);
4621 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4622 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4627 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4628 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4633 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4640 hci_req_init(&req, hdev);
4642 write_fast_connectable(&req, cp->val);
4644 err = hci_req_run(&req, fast_connectable_complete);
4646 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4647 MGMT_STATUS_FAILED);
4648 mgmt_pending_remove(cmd);
4652 hci_dev_unlock(hdev);
4657 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4659 struct pending_cmd *cmd;
4661 BT_DBG("status 0x%02x", status);
4665 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4670 u8 mgmt_err = mgmt_status(status);
4672 /* We need to restore the flag if related HCI commands
4675 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4677 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4679 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4680 new_settings(hdev, cmd->sk);
4683 mgmt_pending_remove(cmd);
4686 hci_dev_unlock(hdev);
4689 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4691 struct mgmt_mode *cp = data;
4692 struct pending_cmd *cmd;
4693 struct hci_request req;
4696 BT_DBG("request for %s", hdev->name);
4698 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4699 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4700 MGMT_STATUS_NOT_SUPPORTED);
4702 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4703 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4704 MGMT_STATUS_REJECTED);
4706 if (cp->val != 0x00 && cp->val != 0x01)
4707 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4708 MGMT_STATUS_INVALID_PARAMS);
4712 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4713 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4717 if (!hdev_is_powered(hdev)) {
4719 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4720 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4721 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4722 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4723 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4726 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4728 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4732 err = new_settings(hdev, sk);
4736 /* Reject disabling when powered on */
4738 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4739 MGMT_STATUS_REJECTED);
4742 /* When configuring a dual-mode controller to operate
4743 * with LE only and using a static address, then switching
4744 * BR/EDR back on is not allowed.
4746 * Dual-mode controllers shall operate with the public
4747 * address as its identity address for BR/EDR and LE. So
4748 * reject the attempt to create an invalid configuration.
4750 * The same restrictions applies when secure connections
4751 * has been enabled. For BR/EDR this is a controller feature
4752 * while for LE it is a host stack feature. This means that
4753 * switching BR/EDR back on when secure connections has been
4754 * enabled is not a supported transaction.
4756 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4757 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4758 test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) {
4759 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4760 MGMT_STATUS_REJECTED);
4765 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4766 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4771 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4777 /* We need to flip the bit already here so that update_adv_data
4778 * generates the correct flags.
4780 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4782 hci_req_init(&req, hdev);
4784 write_fast_connectable(&req, false);
4785 __hci_update_page_scan(&req);
4787 /* Since only the advertising data flags will change, there
4788 * is no need to update the scan response data.
4790 update_adv_data(&req);
4792 err = hci_req_run(&req, set_bredr_complete);
4794 mgmt_pending_remove(cmd);
4797 hci_dev_unlock(hdev);
4801 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4803 struct pending_cmd *cmd;
4804 struct mgmt_mode *cp;
4806 BT_DBG("%s status %u", hdev->name, status);
4810 cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4815 cmd_status(cmd->sk, cmd->index, cmd->opcode,
4816 mgmt_status(status));
4824 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4825 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4828 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4829 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4832 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4833 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4837 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4838 new_settings(hdev, cmd->sk);
4841 mgmt_pending_remove(cmd);
4843 hci_dev_unlock(hdev);
4846 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4847 void *data, u16 len)
4849 struct mgmt_mode *cp = data;
4850 struct pending_cmd *cmd;
4851 struct hci_request req;
4855 BT_DBG("request for %s", hdev->name);
4857 if (!lmp_sc_capable(hdev) &&
4858 !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4859 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4860 MGMT_STATUS_NOT_SUPPORTED);
4862 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4863 lmp_sc_capable(hdev) &&
4864 !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4865 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4866 MGMT_STATUS_REJECTED);
4868 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4869 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4870 MGMT_STATUS_INVALID_PARAMS);
4874 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4875 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4879 changed = !test_and_set_bit(HCI_SC_ENABLED,
4881 if (cp->val == 0x02)
4882 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4884 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4886 changed = test_and_clear_bit(HCI_SC_ENABLED,
4888 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4891 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4896 err = new_settings(hdev, sk);
4901 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4902 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4909 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4910 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4911 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4915 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4921 hci_req_init(&req, hdev);
4922 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4923 err = hci_req_run(&req, sc_enable_complete);
4925 mgmt_pending_remove(cmd);
4930 hci_dev_unlock(hdev);
4934 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4935 void *data, u16 len)
4937 struct mgmt_mode *cp = data;
4938 bool changed, use_changed;
4941 BT_DBG("request for %s", hdev->name);
4943 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4944 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4945 MGMT_STATUS_INVALID_PARAMS);
4950 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4953 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4956 if (cp->val == 0x02)
4957 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4960 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4963 if (hdev_is_powered(hdev) && use_changed &&
4964 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4965 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4966 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4967 sizeof(mode), &mode);
4970 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4975 err = new_settings(hdev, sk);
4978 hci_dev_unlock(hdev);
4982 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4985 struct mgmt_cp_set_privacy *cp = cp_data;
4989 BT_DBG("request for %s", hdev->name);
4991 if (!lmp_le_capable(hdev))
4992 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4993 MGMT_STATUS_NOT_SUPPORTED);
4995 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4996 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4997 MGMT_STATUS_INVALID_PARAMS);
4999 if (hdev_is_powered(hdev))
5000 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5001 MGMT_STATUS_REJECTED);
5005 /* If user space supports this command it is also expected to
5006 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5008 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5011 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
5012 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5013 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
5015 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
5016 memset(hdev->irk, 0, sizeof(hdev->irk));
5017 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
5020 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5025 err = new_settings(hdev, sk);
5028 hci_dev_unlock(hdev);
5032 static bool irk_is_valid(struct mgmt_irk_info *irk)
5034 switch (irk->addr.type) {
5035 case BDADDR_LE_PUBLIC:
5038 case BDADDR_LE_RANDOM:
5039 /* Two most significant bits shall be set */
5040 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5048 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5051 struct mgmt_cp_load_irks *cp = cp_data;
5052 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5053 sizeof(struct mgmt_irk_info));
5054 u16 irk_count, expected_len;
5057 BT_DBG("request for %s", hdev->name);
5059 if (!lmp_le_capable(hdev))
5060 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5061 MGMT_STATUS_NOT_SUPPORTED);
5063 irk_count = __le16_to_cpu(cp->irk_count);
5064 if (irk_count > max_irk_count) {
5065 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5066 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5067 MGMT_STATUS_INVALID_PARAMS);
5070 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5071 if (expected_len != len) {
5072 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5074 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5075 MGMT_STATUS_INVALID_PARAMS);
5078 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5080 for (i = 0; i < irk_count; i++) {
5081 struct mgmt_irk_info *key = &cp->irks[i];
5083 if (!irk_is_valid(key))
5084 return cmd_status(sk, hdev->id,
5086 MGMT_STATUS_INVALID_PARAMS);
5091 hci_smp_irks_clear(hdev);
5093 for (i = 0; i < irk_count; i++) {
5094 struct mgmt_irk_info *irk = &cp->irks[i];
5097 if (irk->addr.type == BDADDR_LE_PUBLIC)
5098 addr_type = ADDR_LE_DEV_PUBLIC;
5100 addr_type = ADDR_LE_DEV_RANDOM;
5102 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5106 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5108 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5110 hci_dev_unlock(hdev);
5115 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5117 if (key->master != 0x00 && key->master != 0x01)
5120 switch (key->addr.type) {
5121 case BDADDR_LE_PUBLIC:
5124 case BDADDR_LE_RANDOM:
5125 /* Two most significant bits shall be set */
5126 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5134 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5135 void *cp_data, u16 len)
5137 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5138 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5139 sizeof(struct mgmt_ltk_info));
5140 u16 key_count, expected_len;
5143 BT_DBG("request for %s", hdev->name);
5145 if (!lmp_le_capable(hdev))
5146 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5147 MGMT_STATUS_NOT_SUPPORTED);
5149 key_count = __le16_to_cpu(cp->key_count);
5150 if (key_count > max_key_count) {
5151 BT_ERR("load_ltks: too big key_count value %u", key_count);
5152 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5153 MGMT_STATUS_INVALID_PARAMS);
5156 expected_len = sizeof(*cp) + key_count *
5157 sizeof(struct mgmt_ltk_info);
5158 if (expected_len != len) {
5159 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5161 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5162 MGMT_STATUS_INVALID_PARAMS);
5165 BT_DBG("%s key_count %u", hdev->name, key_count);
5167 for (i = 0; i < key_count; i++) {
5168 struct mgmt_ltk_info *key = &cp->keys[i];
5170 if (!ltk_is_valid(key))
5171 return cmd_status(sk, hdev->id,
5172 MGMT_OP_LOAD_LONG_TERM_KEYS,
5173 MGMT_STATUS_INVALID_PARAMS);
5178 hci_smp_ltks_clear(hdev);
5180 for (i = 0; i < key_count; i++) {
5181 struct mgmt_ltk_info *key = &cp->keys[i];
5182 u8 type, addr_type, authenticated;
5184 if (key->addr.type == BDADDR_LE_PUBLIC)
5185 addr_type = ADDR_LE_DEV_PUBLIC;
5187 addr_type = ADDR_LE_DEV_RANDOM;
5189 switch (key->type) {
5190 case MGMT_LTK_UNAUTHENTICATED:
5191 authenticated = 0x00;
5192 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5194 case MGMT_LTK_AUTHENTICATED:
5195 authenticated = 0x01;
5196 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5198 case MGMT_LTK_P256_UNAUTH:
5199 authenticated = 0x00;
5200 type = SMP_LTK_P256;
5202 case MGMT_LTK_P256_AUTH:
5203 authenticated = 0x01;
5204 type = SMP_LTK_P256;
5206 case MGMT_LTK_P256_DEBUG:
5207 authenticated = 0x00;
5208 type = SMP_LTK_P256_DEBUG;
5213 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5214 authenticated, key->val, key->enc_size, key->ediv,
5218 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5221 hci_dev_unlock(hdev);
5226 static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5228 struct hci_conn *conn = cmd->user_data;
5229 struct mgmt_rp_get_conn_info rp;
5232 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5234 if (status == MGMT_STATUS_SUCCESS) {
5235 rp.rssi = conn->rssi;
5236 rp.tx_power = conn->tx_power;
5237 rp.max_tx_power = conn->max_tx_power;
5239 rp.rssi = HCI_RSSI_INVALID;
5240 rp.tx_power = HCI_TX_POWER_INVALID;
5241 rp.max_tx_power = HCI_TX_POWER_INVALID;
5244 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5247 hci_conn_drop(conn);
5253 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5256 struct hci_cp_read_rssi *cp;
5257 struct pending_cmd *cmd;
5258 struct hci_conn *conn;
5262 BT_DBG("status 0x%02x", hci_status);
5266 /* Commands sent in request are either Read RSSI or Read Transmit Power
5267 * Level so we check which one was last sent to retrieve connection
5268 * handle. Both commands have handle as first parameter so it's safe to
5269 * cast data on the same command struct.
5271 * First command sent is always Read RSSI and we fail only if it fails.
5272 * In other case we simply override error to indicate success as we
5273 * already remembered if TX power value is actually valid.
5275 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5277 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5278 status = MGMT_STATUS_SUCCESS;
5280 status = mgmt_status(hci_status);
5284 BT_ERR("invalid sent_cmd in conn_info response");
5288 handle = __le16_to_cpu(cp->handle);
5289 conn = hci_conn_hash_lookup_handle(hdev, handle);
5291 BT_ERR("unknown handle (%d) in conn_info response", handle);
5295 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5299 cmd->cmd_complete(cmd, status);
5300 mgmt_pending_remove(cmd);
5303 hci_dev_unlock(hdev);
5306 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5309 struct mgmt_cp_get_conn_info *cp = data;
5310 struct mgmt_rp_get_conn_info rp;
5311 struct hci_conn *conn;
5312 unsigned long conn_info_age;
5315 BT_DBG("%s", hdev->name);
5317 memset(&rp, 0, sizeof(rp));
5318 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5319 rp.addr.type = cp->addr.type;
5321 if (!bdaddr_type_is_valid(cp->addr.type))
5322 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5323 MGMT_STATUS_INVALID_PARAMS,
5328 if (!hdev_is_powered(hdev)) {
5329 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5330 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5334 if (cp->addr.type == BDADDR_BREDR)
5335 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5338 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5340 if (!conn || conn->state != BT_CONNECTED) {
5341 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5342 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5346 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5347 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5348 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5352 /* To avoid client trying to guess when to poll again for information we
5353 * calculate conn info age as random value between min/max set in hdev.
5355 conn_info_age = hdev->conn_info_min_age +
5356 prandom_u32_max(hdev->conn_info_max_age -
5357 hdev->conn_info_min_age);
5359 /* Query controller to refresh cached values if they are too old or were
5362 if (time_after(jiffies, conn->conn_info_timestamp +
5363 msecs_to_jiffies(conn_info_age)) ||
5364 !conn->conn_info_timestamp) {
5365 struct hci_request req;
5366 struct hci_cp_read_tx_power req_txp_cp;
5367 struct hci_cp_read_rssi req_rssi_cp;
5368 struct pending_cmd *cmd;
5370 hci_req_init(&req, hdev);
5371 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5372 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5375 /* For LE links TX power does not change thus we don't need to
5376 * query for it once value is known.
5378 if (!bdaddr_type_is_le(cp->addr.type) ||
5379 conn->tx_power == HCI_TX_POWER_INVALID) {
5380 req_txp_cp.handle = cpu_to_le16(conn->handle);
5381 req_txp_cp.type = 0x00;
5382 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5383 sizeof(req_txp_cp), &req_txp_cp);
5386 /* Max TX power needs to be read only once per connection */
5387 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5388 req_txp_cp.handle = cpu_to_le16(conn->handle);
5389 req_txp_cp.type = 0x01;
5390 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5391 sizeof(req_txp_cp), &req_txp_cp);
5394 err = hci_req_run(&req, conn_info_refresh_complete);
5398 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5405 hci_conn_hold(conn);
5406 cmd->user_data = hci_conn_get(conn);
5407 cmd->cmd_complete = conn_info_cmd_complete;
5409 conn->conn_info_timestamp = jiffies;
5411 /* Cache is valid, just reply with values cached in hci_conn */
5412 rp.rssi = conn->rssi;
5413 rp.tx_power = conn->tx_power;
5414 rp.max_tx_power = conn->max_tx_power;
5416 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5417 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5421 hci_dev_unlock(hdev);
5425 static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5427 struct hci_conn *conn = cmd->user_data;
5428 struct mgmt_rp_get_clock_info rp;
5429 struct hci_dev *hdev;
5432 memset(&rp, 0, sizeof(rp));
5433 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5438 hdev = hci_dev_get(cmd->index);
5440 rp.local_clock = cpu_to_le32(hdev->clock);
5445 rp.piconet_clock = cpu_to_le32(conn->clock);
5446 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5450 err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5454 hci_conn_drop(conn);
5461 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5463 struct hci_cp_read_clock *hci_cp;
5464 struct pending_cmd *cmd;
5465 struct hci_conn *conn;
5467 BT_DBG("%s status %u", hdev->name, status);
5471 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5475 if (hci_cp->which) {
5476 u16 handle = __le16_to_cpu(hci_cp->handle);
5477 conn = hci_conn_hash_lookup_handle(hdev, handle);
5482 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5486 cmd->cmd_complete(cmd, mgmt_status(status));
5487 mgmt_pending_remove(cmd);
5490 hci_dev_unlock(hdev);
5493 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5496 struct mgmt_cp_get_clock_info *cp = data;
5497 struct mgmt_rp_get_clock_info rp;
5498 struct hci_cp_read_clock hci_cp;
5499 struct pending_cmd *cmd;
5500 struct hci_request req;
5501 struct hci_conn *conn;
5504 BT_DBG("%s", hdev->name);
5506 memset(&rp, 0, sizeof(rp));
5507 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5508 rp.addr.type = cp->addr.type;
5510 if (cp->addr.type != BDADDR_BREDR)
5511 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5512 MGMT_STATUS_INVALID_PARAMS,
5517 if (!hdev_is_powered(hdev)) {
5518 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5519 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5523 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5524 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5526 if (!conn || conn->state != BT_CONNECTED) {
5527 err = cmd_complete(sk, hdev->id,
5528 MGMT_OP_GET_CLOCK_INFO,
5529 MGMT_STATUS_NOT_CONNECTED,
5537 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5543 cmd->cmd_complete = clock_info_cmd_complete;
5545 hci_req_init(&req, hdev);
5547 memset(&hci_cp, 0, sizeof(hci_cp));
5548 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5551 hci_conn_hold(conn);
5552 cmd->user_data = hci_conn_get(conn);
5554 hci_cp.handle = cpu_to_le16(conn->handle);
5555 hci_cp.which = 0x01; /* Piconet clock */
5556 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5559 err = hci_req_run(&req, get_clock_info_complete);
5561 mgmt_pending_remove(cmd);
5564 hci_dev_unlock(hdev);
5568 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5570 struct hci_conn *conn;
5572 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5576 if (conn->dst_type != type)
5579 if (conn->state != BT_CONNECTED)
5585 /* This function requires the caller holds hdev->lock */
5586 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5587 u8 addr_type, u8 auto_connect)
5589 struct hci_dev *hdev = req->hdev;
5590 struct hci_conn_params *params;
5592 params = hci_conn_params_add(hdev, addr, addr_type);
5596 if (params->auto_connect == auto_connect)
5599 list_del_init(¶ms->action);
5601 switch (auto_connect) {
5602 case HCI_AUTO_CONN_DISABLED:
5603 case HCI_AUTO_CONN_LINK_LOSS:
5604 __hci_update_background_scan(req);
5606 case HCI_AUTO_CONN_REPORT:
5607 list_add(¶ms->action, &hdev->pend_le_reports);
5608 __hci_update_background_scan(req);
5610 case HCI_AUTO_CONN_DIRECT:
5611 case HCI_AUTO_CONN_ALWAYS:
5612 if (!is_connected(hdev, addr, addr_type)) {
5613 list_add(¶ms->action, &hdev->pend_le_conns);
5614 __hci_update_background_scan(req);
5619 params->auto_connect = auto_connect;
5621 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5627 static void device_added(struct sock *sk, struct hci_dev *hdev,
5628 bdaddr_t *bdaddr, u8 type, u8 action)
5630 struct mgmt_ev_device_added ev;
5632 bacpy(&ev.addr.bdaddr, bdaddr);
5633 ev.addr.type = type;
5636 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5639 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5641 struct pending_cmd *cmd;
5643 BT_DBG("status 0x%02x", status);
5647 cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
5651 cmd->cmd_complete(cmd, mgmt_status(status));
5652 mgmt_pending_remove(cmd);
5655 hci_dev_unlock(hdev);
5658 static int add_device(struct sock *sk, struct hci_dev *hdev,
5659 void *data, u16 len)
5661 struct mgmt_cp_add_device *cp = data;
5662 struct pending_cmd *cmd;
5663 struct hci_request req;
5664 u8 auto_conn, addr_type;
5667 BT_DBG("%s", hdev->name);
5669 if (!bdaddr_type_is_valid(cp->addr.type) ||
5670 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5671 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5672 MGMT_STATUS_INVALID_PARAMS,
5673 &cp->addr, sizeof(cp->addr));
5675 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5676 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5677 MGMT_STATUS_INVALID_PARAMS,
5678 &cp->addr, sizeof(cp->addr));
5680 hci_req_init(&req, hdev);
5684 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5690 cmd->cmd_complete = addr_cmd_complete;
5692 if (cp->addr.type == BDADDR_BREDR) {
5693 /* Only incoming connections action is supported for now */
5694 if (cp->action != 0x01) {
5695 err = cmd->cmd_complete(cmd,
5696 MGMT_STATUS_INVALID_PARAMS);
5697 mgmt_pending_remove(cmd);
5701 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5706 __hci_update_page_scan(&req);
5711 if (cp->addr.type == BDADDR_LE_PUBLIC)
5712 addr_type = ADDR_LE_DEV_PUBLIC;
5714 addr_type = ADDR_LE_DEV_RANDOM;
5716 if (cp->action == 0x02)
5717 auto_conn = HCI_AUTO_CONN_ALWAYS;
5718 else if (cp->action == 0x01)
5719 auto_conn = HCI_AUTO_CONN_DIRECT;
5721 auto_conn = HCI_AUTO_CONN_REPORT;
5723 /* If the connection parameters don't exist for this device,
5724 * they will be created and configured with defaults.
5726 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5728 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5729 mgmt_pending_remove(cmd);
5734 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5736 err = hci_req_run(&req, add_device_complete);
5738 /* ENODATA means no HCI commands were needed (e.g. if
5739 * the adapter is powered off).
5741 if (err == -ENODATA)
5742 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5743 mgmt_pending_remove(cmd);
5747 hci_dev_unlock(hdev);
5751 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5752 bdaddr_t *bdaddr, u8 type)
5754 struct mgmt_ev_device_removed ev;
5756 bacpy(&ev.addr.bdaddr, bdaddr);
5757 ev.addr.type = type;
5759 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5762 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5764 struct pending_cmd *cmd;
5766 BT_DBG("status 0x%02x", status);
5770 cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5774 cmd->cmd_complete(cmd, mgmt_status(status));
5775 mgmt_pending_remove(cmd);
5778 hci_dev_unlock(hdev);
5781 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5782 void *data, u16 len)
5784 struct mgmt_cp_remove_device *cp = data;
5785 struct pending_cmd *cmd;
5786 struct hci_request req;
5789 BT_DBG("%s", hdev->name);
5791 hci_req_init(&req, hdev);
5795 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5801 cmd->cmd_complete = addr_cmd_complete;
5803 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5804 struct hci_conn_params *params;
5807 if (!bdaddr_type_is_valid(cp->addr.type)) {
5808 err = cmd->cmd_complete(cmd,
5809 MGMT_STATUS_INVALID_PARAMS);
5810 mgmt_pending_remove(cmd);
5814 if (cp->addr.type == BDADDR_BREDR) {
5815 err = hci_bdaddr_list_del(&hdev->whitelist,
5819 err = cmd->cmd_complete(cmd,
5820 MGMT_STATUS_INVALID_PARAMS);
5821 mgmt_pending_remove(cmd);
5825 __hci_update_page_scan(&req);
5827 device_removed(sk, hdev, &cp->addr.bdaddr,
5832 if (cp->addr.type == BDADDR_LE_PUBLIC)
5833 addr_type = ADDR_LE_DEV_PUBLIC;
5835 addr_type = ADDR_LE_DEV_RANDOM;
5837 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5840 err = cmd->cmd_complete(cmd,
5841 MGMT_STATUS_INVALID_PARAMS);
5842 mgmt_pending_remove(cmd);
5846 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5847 err = cmd->cmd_complete(cmd,
5848 MGMT_STATUS_INVALID_PARAMS);
5849 mgmt_pending_remove(cmd);
5853 list_del(¶ms->action);
5854 list_del(¶ms->list);
5856 __hci_update_background_scan(&req);
5858 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5860 struct hci_conn_params *p, *tmp;
5861 struct bdaddr_list *b, *btmp;
5863 if (cp->addr.type) {
5864 err = cmd->cmd_complete(cmd,
5865 MGMT_STATUS_INVALID_PARAMS);
5866 mgmt_pending_remove(cmd);
5870 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5871 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5876 __hci_update_page_scan(&req);
5878 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5879 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5881 device_removed(sk, hdev, &p->addr, p->addr_type);
5882 list_del(&p->action);
5887 BT_DBG("All LE connection parameters were removed");
5889 __hci_update_background_scan(&req);
5893 err = hci_req_run(&req, remove_device_complete);
5895 /* ENODATA means no HCI commands were needed (e.g. if
5896 * the adapter is powered off).
5898 if (err == -ENODATA)
5899 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5900 mgmt_pending_remove(cmd);
5904 hci_dev_unlock(hdev);
5908 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5911 struct mgmt_cp_load_conn_param *cp = data;
5912 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5913 sizeof(struct mgmt_conn_param));
5914 u16 param_count, expected_len;
5917 if (!lmp_le_capable(hdev))
5918 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5919 MGMT_STATUS_NOT_SUPPORTED);
5921 param_count = __le16_to_cpu(cp->param_count);
5922 if (param_count > max_param_count) {
5923 BT_ERR("load_conn_param: too big param_count value %u",
5925 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5926 MGMT_STATUS_INVALID_PARAMS);
5929 expected_len = sizeof(*cp) + param_count *
5930 sizeof(struct mgmt_conn_param);
5931 if (expected_len != len) {
5932 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5934 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5935 MGMT_STATUS_INVALID_PARAMS);
5938 BT_DBG("%s param_count %u", hdev->name, param_count);
5942 hci_conn_params_clear_disabled(hdev);
5944 for (i = 0; i < param_count; i++) {
5945 struct mgmt_conn_param *param = &cp->params[i];
5946 struct hci_conn_params *hci_param;
5947 u16 min, max, latency, timeout;
5950 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5953 if (param->addr.type == BDADDR_LE_PUBLIC) {
5954 addr_type = ADDR_LE_DEV_PUBLIC;
5955 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5956 addr_type = ADDR_LE_DEV_RANDOM;
5958 BT_ERR("Ignoring invalid connection parameters");
5962 min = le16_to_cpu(param->min_interval);
5963 max = le16_to_cpu(param->max_interval);
5964 latency = le16_to_cpu(param->latency);
5965 timeout = le16_to_cpu(param->timeout);
5967 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5968 min, max, latency, timeout);
5970 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5971 BT_ERR("Ignoring invalid connection parameters");
5975 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5978 BT_ERR("Failed to add connection parameters");
5982 hci_param->conn_min_interval = min;
5983 hci_param->conn_max_interval = max;
5984 hci_param->conn_latency = latency;
5985 hci_param->supervision_timeout = timeout;
5988 hci_dev_unlock(hdev);
5990 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5993 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5994 void *data, u16 len)
5996 struct mgmt_cp_set_external_config *cp = data;
6000 BT_DBG("%s", hdev->name);
6002 if (hdev_is_powered(hdev))
6003 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6004 MGMT_STATUS_REJECTED);
6006 if (cp->config != 0x00 && cp->config != 0x01)
6007 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6008 MGMT_STATUS_INVALID_PARAMS);
6010 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6011 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6012 MGMT_STATUS_NOT_SUPPORTED);
6017 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
6020 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
6023 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6030 err = new_options(hdev, sk);
6032 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
6033 mgmt_index_removed(hdev);
6035 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6036 set_bit(HCI_CONFIG, &hdev->dev_flags);
6037 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
6039 queue_work(hdev->req_workqueue, &hdev->power_on);
6041 set_bit(HCI_RAW, &hdev->flags);
6042 mgmt_index_added(hdev);
6047 hci_dev_unlock(hdev);
6051 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6052 void *data, u16 len)
6054 struct mgmt_cp_set_public_address *cp = data;
6058 BT_DBG("%s", hdev->name);
6060 if (hdev_is_powered(hdev))
6061 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6062 MGMT_STATUS_REJECTED);
6064 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6065 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6066 MGMT_STATUS_INVALID_PARAMS);
6068 if (!hdev->set_bdaddr)
6069 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6070 MGMT_STATUS_NOT_SUPPORTED);
6074 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6075 bacpy(&hdev->public_addr, &cp->bdaddr);
6077 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6084 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6085 err = new_options(hdev, sk);
6087 if (is_configured(hdev)) {
6088 mgmt_index_removed(hdev);
6090 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
6092 set_bit(HCI_CONFIG, &hdev->dev_flags);
6093 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
6095 queue_work(hdev->req_workqueue, &hdev->power_on);
6099 hci_dev_unlock(hdev);
6103 static const struct mgmt_handler {
6104 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
6108 } mgmt_handlers[] = {
6109 { NULL }, /* 0x0000 (no command) */
6110 { read_version, false, MGMT_READ_VERSION_SIZE },
6111 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
6112 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
6113 { read_controller_info, false, MGMT_READ_INFO_SIZE },
6114 { set_powered, false, MGMT_SETTING_SIZE },
6115 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
6116 { set_connectable, false, MGMT_SETTING_SIZE },
6117 { set_fast_connectable, false, MGMT_SETTING_SIZE },
6118 { set_bondable, false, MGMT_SETTING_SIZE },
6119 { set_link_security, false, MGMT_SETTING_SIZE },
6120 { set_ssp, false, MGMT_SETTING_SIZE },
6121 { set_hs, false, MGMT_SETTING_SIZE },
6122 { set_le, false, MGMT_SETTING_SIZE },
6123 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
6124 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
6125 { add_uuid, false, MGMT_ADD_UUID_SIZE },
6126 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
6127 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
6128 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
6129 { disconnect, false, MGMT_DISCONNECT_SIZE },
6130 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
6131 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
6132 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6133 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
6134 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
6135 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6136 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
6137 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
6138 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6139 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
6140 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6141 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6142 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
6143 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6144 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
6145 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
6146 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
6147 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
6148 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
6149 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
6150 { set_advertising, false, MGMT_SETTING_SIZE },
6151 { set_bredr, false, MGMT_SETTING_SIZE },
6152 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
6153 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
6154 { set_secure_conn, false, MGMT_SETTING_SIZE },
6155 { set_debug_keys, false, MGMT_SETTING_SIZE },
6156 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
6157 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
6158 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
6159 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
6160 { add_device, false, MGMT_ADD_DEVICE_SIZE },
6161 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
6162 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
6163 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
6164 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
6165 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
6166 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
6167 { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
6170 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
6174 struct mgmt_hdr *hdr;
6175 u16 opcode, index, len;
6176 struct hci_dev *hdev = NULL;
6177 const struct mgmt_handler *handler;
6180 BT_DBG("got %zu bytes", msglen);
6182 if (msglen < sizeof(*hdr))
6185 buf = kmalloc(msglen, GFP_KERNEL);
6189 if (memcpy_from_msg(buf, msg, msglen)) {
6195 opcode = __le16_to_cpu(hdr->opcode);
6196 index = __le16_to_cpu(hdr->index);
6197 len = __le16_to_cpu(hdr->len);
6199 if (len != msglen - sizeof(*hdr)) {
6204 if (index != MGMT_INDEX_NONE) {
6205 hdev = hci_dev_get(index);
6207 err = cmd_status(sk, index, opcode,
6208 MGMT_STATUS_INVALID_INDEX);
6212 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
6213 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
6214 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
6215 err = cmd_status(sk, index, opcode,
6216 MGMT_STATUS_INVALID_INDEX);
6220 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
6221 opcode != MGMT_OP_READ_CONFIG_INFO &&
6222 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
6223 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
6224 err = cmd_status(sk, index, opcode,
6225 MGMT_STATUS_INVALID_INDEX);
6230 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
6231 mgmt_handlers[opcode].func == NULL) {
6232 BT_DBG("Unknown op %u", opcode);
6233 err = cmd_status(sk, index, opcode,
6234 MGMT_STATUS_UNKNOWN_COMMAND);
6238 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
6239 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6240 err = cmd_status(sk, index, opcode,
6241 MGMT_STATUS_INVALID_INDEX);
6245 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
6246 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6247 err = cmd_status(sk, index, opcode,
6248 MGMT_STATUS_INVALID_INDEX);
6252 handler = &mgmt_handlers[opcode];
6254 if ((handler->var_len && len < handler->data_len) ||
6255 (!handler->var_len && len != handler->data_len)) {
6256 err = cmd_status(sk, index, opcode,
6257 MGMT_STATUS_INVALID_PARAMS);
6262 mgmt_init_hdev(sk, hdev);
6264 cp = buf + sizeof(*hdr);
6266 err = handler->func(sk, hdev, cp, len);
6280 void mgmt_index_added(struct hci_dev *hdev)
6282 if (hdev->dev_type != HCI_BREDR)
6285 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6288 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6289 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6291 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6294 void mgmt_index_removed(struct hci_dev *hdev)
6296 u8 status = MGMT_STATUS_INVALID_INDEX;
6298 if (hdev->dev_type != HCI_BREDR)
6301 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6304 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6306 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6307 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6309 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6312 /* This function requires the caller holds hdev->lock */
6313 static void restart_le_actions(struct hci_request *req)
6315 struct hci_dev *hdev = req->hdev;
6316 struct hci_conn_params *p;
6318 list_for_each_entry(p, &hdev->le_conn_params, list) {
6319 /* Needed for AUTO_OFF case where might not "really"
6320 * have been powered off.
6322 list_del_init(&p->action);
6324 switch (p->auto_connect) {
6325 case HCI_AUTO_CONN_DIRECT:
6326 case HCI_AUTO_CONN_ALWAYS:
6327 list_add(&p->action, &hdev->pend_le_conns);
6329 case HCI_AUTO_CONN_REPORT:
6330 list_add(&p->action, &hdev->pend_le_reports);
6337 __hci_update_background_scan(req);
6340 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6342 struct cmd_lookup match = { NULL, hdev };
6344 BT_DBG("status 0x%02x", status);
6347 /* Register the available SMP channels (BR/EDR and LE) only
6348 * when successfully powering on the controller. This late
6349 * registration is required so that LE SMP can clearly
6350 * decide if the public address or static address is used.
6357 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6359 new_settings(hdev, match.sk);
6361 hci_dev_unlock(hdev);
6367 static int powered_update_hci(struct hci_dev *hdev)
6369 struct hci_request req;
6372 hci_req_init(&req, hdev);
6374 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6375 !lmp_host_ssp_capable(hdev)) {
6378 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
6380 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6383 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
6384 sizeof(support), &support);
6388 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6389 lmp_bredr_capable(hdev)) {
6390 struct hci_cp_write_le_host_supported cp;
6395 /* Check first if we already have the right
6396 * host state (host features set)
6398 if (cp.le != lmp_host_le_capable(hdev) ||
6399 cp.simul != lmp_host_le_br_capable(hdev))
6400 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6404 if (lmp_le_capable(hdev)) {
6405 /* Make sure the controller has a good default for
6406 * advertising data. This also applies to the case
6407 * where BR/EDR was toggled during the AUTO_OFF phase.
6409 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6410 update_adv_data(&req);
6411 update_scan_rsp_data(&req);
6414 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6415 enable_advertising(&req);
6417 restart_le_actions(&req);
6420 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6421 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6422 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6423 sizeof(link_sec), &link_sec);
6425 if (lmp_bredr_capable(hdev)) {
6426 write_fast_connectable(&req, false);
6427 __hci_update_page_scan(&req);
6433 return hci_req_run(&req, powered_complete);
6436 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6438 struct cmd_lookup match = { NULL, hdev };
6439 u8 status, zero_cod[] = { 0, 0, 0 };
6442 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6446 if (powered_update_hci(hdev) == 0)
6449 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6454 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6456 /* If the power off is because of hdev unregistration let
6457 * use the appropriate INVALID_INDEX status. Otherwise use
6458 * NOT_POWERED. We cover both scenarios here since later in
6459 * mgmt_index_removed() any hci_conn callbacks will have already
6460 * been triggered, potentially causing misleading DISCONNECTED
6463 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
6464 status = MGMT_STATUS_INVALID_INDEX;
6466 status = MGMT_STATUS_NOT_POWERED;
6468 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6470 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6471 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6472 zero_cod, sizeof(zero_cod), NULL);
6475 err = new_settings(hdev, match.sk);
6483 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6485 struct pending_cmd *cmd;
6488 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6492 if (err == -ERFKILL)
6493 status = MGMT_STATUS_RFKILLED;
6495 status = MGMT_STATUS_FAILED;
6497 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6499 mgmt_pending_remove(cmd);
6502 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6504 struct hci_request req;
6508 /* When discoverable timeout triggers, then just make sure
6509 * the limited discoverable flag is cleared. Even in the case
6510 * of a timeout triggered from general discoverable, it is
6511 * safe to unconditionally clear the flag.
6513 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6514 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6516 hci_req_init(&req, hdev);
6517 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6518 u8 scan = SCAN_PAGE;
6519 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6520 sizeof(scan), &scan);
6523 update_adv_data(&req);
6524 hci_req_run(&req, NULL);
6526 hdev->discov_timeout = 0;
6528 new_settings(hdev, NULL);
6530 hci_dev_unlock(hdev);
6533 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6536 struct mgmt_ev_new_link_key ev;
6538 memset(&ev, 0, sizeof(ev));
6540 ev.store_hint = persistent;
6541 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6542 ev.key.addr.type = BDADDR_BREDR;
6543 ev.key.type = key->type;
6544 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6545 ev.key.pin_len = key->pin_len;
6547 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6550 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6552 switch (ltk->type) {
6555 if (ltk->authenticated)
6556 return MGMT_LTK_AUTHENTICATED;
6557 return MGMT_LTK_UNAUTHENTICATED;
6559 if (ltk->authenticated)
6560 return MGMT_LTK_P256_AUTH;
6561 return MGMT_LTK_P256_UNAUTH;
6562 case SMP_LTK_P256_DEBUG:
6563 return MGMT_LTK_P256_DEBUG;
6566 return MGMT_LTK_UNAUTHENTICATED;
6569 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6571 struct mgmt_ev_new_long_term_key ev;
6573 memset(&ev, 0, sizeof(ev));
6575 /* Devices using resolvable or non-resolvable random addresses
6576 * without providing an indentity resolving key don't require
6577 * to store long term keys. Their addresses will change the
6580 * Only when a remote device provides an identity address
6581 * make sure the long term key is stored. If the remote
6582 * identity is known, the long term keys are internally
6583 * mapped to the identity address. So allow static random
6584 * and public addresses here.
6586 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6587 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6588 ev.store_hint = 0x00;
6590 ev.store_hint = persistent;
6592 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6593 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6594 ev.key.type = mgmt_ltk_type(key);
6595 ev.key.enc_size = key->enc_size;
6596 ev.key.ediv = key->ediv;
6597 ev.key.rand = key->rand;
6599 if (key->type == SMP_LTK)
6602 memcpy(ev.key.val, key->val, sizeof(key->val));
6604 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6607 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6609 struct mgmt_ev_new_irk ev;
6611 memset(&ev, 0, sizeof(ev));
6613 /* For identity resolving keys from devices that are already
6614 * using a public address or static random address, do not
6615 * ask for storing this key. The identity resolving key really
6616 * is only mandatory for devices using resovlable random
6619 * Storing all identity resolving keys has the downside that
6620 * they will be also loaded on next boot of they system. More
6621 * identity resolving keys, means more time during scanning is
6622 * needed to actually resolve these addresses.
6624 if (bacmp(&irk->rpa, BDADDR_ANY))
6625 ev.store_hint = 0x01;
6627 ev.store_hint = 0x00;
6629 bacpy(&ev.rpa, &irk->rpa);
6630 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6631 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6632 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6634 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6637 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6640 struct mgmt_ev_new_csrk ev;
6642 memset(&ev, 0, sizeof(ev));
6644 /* Devices using resolvable or non-resolvable random addresses
6645 * without providing an indentity resolving key don't require
6646 * to store signature resolving keys. Their addresses will change
6647 * the next time around.
6649 * Only when a remote device provides an identity address
6650 * make sure the signature resolving key is stored. So allow
6651 * static random and public addresses here.
6653 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6654 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6655 ev.store_hint = 0x00;
6657 ev.store_hint = persistent;
6659 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6660 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6661 ev.key.master = csrk->master;
6662 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6664 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6667 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6668 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6669 u16 max_interval, u16 latency, u16 timeout)
6671 struct mgmt_ev_new_conn_param ev;
6673 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6676 memset(&ev, 0, sizeof(ev));
6677 bacpy(&ev.addr.bdaddr, bdaddr);
6678 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6679 ev.store_hint = store_hint;
6680 ev.min_interval = cpu_to_le16(min_interval);
6681 ev.max_interval = cpu_to_le16(max_interval);
6682 ev.latency = cpu_to_le16(latency);
6683 ev.timeout = cpu_to_le16(timeout);
6685 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6688 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6691 eir[eir_len++] = sizeof(type) + data_len;
6692 eir[eir_len++] = type;
6693 memcpy(&eir[eir_len], data, data_len);
6694 eir_len += data_len;
6699 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6700 u32 flags, u8 *name, u8 name_len)
6703 struct mgmt_ev_device_connected *ev = (void *) buf;
6706 bacpy(&ev->addr.bdaddr, &conn->dst);
6707 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6709 ev->flags = __cpu_to_le32(flags);
6711 /* We must ensure that the EIR Data fields are ordered and
6712 * unique. Keep it simple for now and avoid the problem by not
6713 * adding any BR/EDR data to the LE adv.
6715 if (conn->le_adv_data_len > 0) {
6716 memcpy(&ev->eir[eir_len],
6717 conn->le_adv_data, conn->le_adv_data_len);
6718 eir_len = conn->le_adv_data_len;
6721 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6724 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6725 eir_len = eir_append_data(ev->eir, eir_len,
6727 conn->dev_class, 3);
6730 ev->eir_len = cpu_to_le16(eir_len);
6732 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6733 sizeof(*ev) + eir_len, NULL);
6736 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6738 struct sock **sk = data;
6740 cmd->cmd_complete(cmd, 0);
6745 mgmt_pending_remove(cmd);
6748 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6750 struct hci_dev *hdev = data;
6751 struct mgmt_cp_unpair_device *cp = cmd->param;
6753 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6755 cmd->cmd_complete(cmd, 0);
6756 mgmt_pending_remove(cmd);
6759 bool mgmt_powering_down(struct hci_dev *hdev)
6761 struct pending_cmd *cmd;
6762 struct mgmt_mode *cp;
6764 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6775 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6776 u8 link_type, u8 addr_type, u8 reason,
6777 bool mgmt_connected)
6779 struct mgmt_ev_device_disconnected ev;
6780 struct sock *sk = NULL;
6782 /* The connection is still in hci_conn_hash so test for 1
6783 * instead of 0 to know if this is the last one.
6785 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6786 cancel_delayed_work(&hdev->power_off);
6787 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6790 if (!mgmt_connected)
6793 if (link_type != ACL_LINK && link_type != LE_LINK)
6796 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6798 bacpy(&ev.addr.bdaddr, bdaddr);
6799 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6802 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6807 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6811 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6812 u8 link_type, u8 addr_type, u8 status)
6814 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6815 struct mgmt_cp_disconnect *cp;
6816 struct pending_cmd *cmd;
6818 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6821 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6827 if (bacmp(bdaddr, &cp->addr.bdaddr))
6830 if (cp->addr.type != bdaddr_type)
6833 cmd->cmd_complete(cmd, mgmt_status(status));
6834 mgmt_pending_remove(cmd);
6837 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6838 u8 addr_type, u8 status)
6840 struct mgmt_ev_connect_failed ev;
6842 /* The connection is still in hci_conn_hash so test for 1
6843 * instead of 0 to know if this is the last one.
6845 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6846 cancel_delayed_work(&hdev->power_off);
6847 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6850 bacpy(&ev.addr.bdaddr, bdaddr);
6851 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6852 ev.status = mgmt_status(status);
6854 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6857 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6859 struct mgmt_ev_pin_code_request ev;
6861 bacpy(&ev.addr.bdaddr, bdaddr);
6862 ev.addr.type = BDADDR_BREDR;
6865 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6868 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6871 struct pending_cmd *cmd;
6873 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6877 cmd->cmd_complete(cmd, mgmt_status(status));
6878 mgmt_pending_remove(cmd);
6881 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6884 struct pending_cmd *cmd;
6886 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6890 cmd->cmd_complete(cmd, mgmt_status(status));
6891 mgmt_pending_remove(cmd);
6894 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6895 u8 link_type, u8 addr_type, u32 value,
6898 struct mgmt_ev_user_confirm_request ev;
6900 BT_DBG("%s", hdev->name);
6902 bacpy(&ev.addr.bdaddr, bdaddr);
6903 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6904 ev.confirm_hint = confirm_hint;
6905 ev.value = cpu_to_le32(value);
6907 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6911 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6912 u8 link_type, u8 addr_type)
6914 struct mgmt_ev_user_passkey_request ev;
6916 BT_DBG("%s", hdev->name);
6918 bacpy(&ev.addr.bdaddr, bdaddr);
6919 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6921 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6925 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6926 u8 link_type, u8 addr_type, u8 status,
6929 struct pending_cmd *cmd;
6931 cmd = mgmt_pending_find(opcode, hdev);
6935 cmd->cmd_complete(cmd, mgmt_status(status));
6936 mgmt_pending_remove(cmd);
6941 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6942 u8 link_type, u8 addr_type, u8 status)
6944 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6945 status, MGMT_OP_USER_CONFIRM_REPLY);
6948 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6949 u8 link_type, u8 addr_type, u8 status)
6951 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6953 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6956 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6957 u8 link_type, u8 addr_type, u8 status)
6959 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6960 status, MGMT_OP_USER_PASSKEY_REPLY);
6963 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6964 u8 link_type, u8 addr_type, u8 status)
6966 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6968 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6971 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6972 u8 link_type, u8 addr_type, u32 passkey,
6975 struct mgmt_ev_passkey_notify ev;
6977 BT_DBG("%s", hdev->name);
6979 bacpy(&ev.addr.bdaddr, bdaddr);
6980 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6981 ev.passkey = __cpu_to_le32(passkey);
6982 ev.entered = entered;
6984 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6987 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6989 struct mgmt_ev_auth_failed ev;
6990 struct pending_cmd *cmd;
6991 u8 status = mgmt_status(hci_status);
6993 bacpy(&ev.addr.bdaddr, &conn->dst);
6994 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6997 cmd = find_pairing(conn);
6999 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7000 cmd ? cmd->sk : NULL);
7003 cmd->cmd_complete(cmd, status);
7004 mgmt_pending_remove(cmd);
7008 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7010 struct cmd_lookup match = { NULL, hdev };
7014 u8 mgmt_err = mgmt_status(status);
7015 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7016 cmd_status_rsp, &mgmt_err);
7020 if (test_bit(HCI_AUTH, &hdev->flags))
7021 changed = !test_and_set_bit(HCI_LINK_SECURITY,
7024 changed = test_and_clear_bit(HCI_LINK_SECURITY,
7027 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7031 new_settings(hdev, match.sk);
7037 static void clear_eir(struct hci_request *req)
7039 struct hci_dev *hdev = req->hdev;
7040 struct hci_cp_write_eir cp;
7042 if (!lmp_ext_inq_capable(hdev))
7045 memset(hdev->eir, 0, sizeof(hdev->eir));
7047 memset(&cp, 0, sizeof(cp));
7049 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7052 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7054 struct cmd_lookup match = { NULL, hdev };
7055 struct hci_request req;
7056 bool changed = false;
7059 u8 mgmt_err = mgmt_status(status);
7061 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
7062 &hdev->dev_flags)) {
7063 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
7064 new_settings(hdev, NULL);
7067 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7073 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
7075 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
7077 changed = test_and_clear_bit(HCI_HS_ENABLED,
7080 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
7083 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7086 new_settings(hdev, match.sk);
7091 hci_req_init(&req, hdev);
7093 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
7094 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
7095 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7096 sizeof(enable), &enable);
7102 hci_req_run(&req, NULL);
7105 static void sk_lookup(struct pending_cmd *cmd, void *data)
7107 struct cmd_lookup *match = data;
7109 if (match->sk == NULL) {
7110 match->sk = cmd->sk;
7111 sock_hold(match->sk);
7115 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7118 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7120 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7121 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7122 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7125 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
7132 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7134 struct mgmt_cp_set_local_name ev;
7135 struct pending_cmd *cmd;
7140 memset(&ev, 0, sizeof(ev));
7141 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7142 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7144 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7146 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7148 /* If this is a HCI command related to powering on the
7149 * HCI dev don't send any mgmt signals.
7151 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
7155 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7156 cmd ? cmd->sk : NULL);
7159 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7160 u8 *rand192, u8 *hash256, u8 *rand256,
7163 struct pending_cmd *cmd;
7165 BT_DBG("%s status %u", hdev->name, status);
7167 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7172 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7173 mgmt_status(status));
7175 struct mgmt_rp_read_local_oob_data rp;
7176 size_t rp_size = sizeof(rp);
7178 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7179 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7181 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7182 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7183 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7185 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7188 cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7192 mgmt_pending_remove(cmd);
7195 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7199 for (i = 0; i < uuid_count; i++) {
7200 if (!memcmp(uuid, uuids[i], 16))
7207 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7211 while (parsed < eir_len) {
7212 u8 field_len = eir[0];
7219 if (eir_len - parsed < field_len + 1)
7223 case EIR_UUID16_ALL:
7224 case EIR_UUID16_SOME:
7225 for (i = 0; i + 3 <= field_len; i += 2) {
7226 memcpy(uuid, bluetooth_base_uuid, 16);
7227 uuid[13] = eir[i + 3];
7228 uuid[12] = eir[i + 2];
7229 if (has_uuid(uuid, uuid_count, uuids))
7233 case EIR_UUID32_ALL:
7234 case EIR_UUID32_SOME:
7235 for (i = 0; i + 5 <= field_len; i += 4) {
7236 memcpy(uuid, bluetooth_base_uuid, 16);
7237 uuid[15] = eir[i + 5];
7238 uuid[14] = eir[i + 4];
7239 uuid[13] = eir[i + 3];
7240 uuid[12] = eir[i + 2];
7241 if (has_uuid(uuid, uuid_count, uuids))
7245 case EIR_UUID128_ALL:
7246 case EIR_UUID128_SOME:
7247 for (i = 0; i + 17 <= field_len; i += 16) {
7248 memcpy(uuid, eir + i + 2, 16);
7249 if (has_uuid(uuid, uuid_count, uuids))
7255 parsed += field_len + 1;
7256 eir += field_len + 1;
7262 static void restart_le_scan(struct hci_dev *hdev)
7264 /* If controller is not scanning we are done. */
7265 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
7268 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7269 hdev->discovery.scan_start +
7270 hdev->discovery.scan_duration))
7273 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
7274 DISCOV_LE_RESTART_DELAY);
7277 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7278 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7279 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7282 struct mgmt_ev_device_found *ev = (void *) buf;
7286 /* Don't send events for a non-kernel initiated discovery. With
7287 * LE one exception is if we have pend_le_reports > 0 in which
7288 * case we're doing passive scanning and want these events.
7290 if (!hci_discovery_active(hdev)) {
7291 if (link_type == ACL_LINK)
7293 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7297 /* When using service discovery with a RSSI threshold, then check
7298 * if such a RSSI threshold is specified. If a RSSI threshold has
7299 * been specified, and HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set,
7300 * then all results with a RSSI smaller than the RSSI threshold will be
7301 * dropped. If the quirk is set, let it through for further processing,
7302 * as we might need to restart the scan.
7304 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7305 * the results are also dropped.
7307 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7308 (rssi == HCI_RSSI_INVALID ||
7309 (rssi < hdev->discovery.rssi &&
7310 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7313 /* Make sure that the buffer is big enough. The 5 extra bytes
7314 * are for the potential CoD field.
7316 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7319 memset(buf, 0, sizeof(buf));
7321 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7322 * RSSI value was reported as 0 when not available. This behavior
7323 * is kept when using device discovery. This is required for full
7324 * backwards compatibility with the API.
7326 * However when using service discovery, the value 127 will be
7327 * returned when the RSSI is not available.
7329 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7330 link_type == ACL_LINK)
7333 bacpy(&ev->addr.bdaddr, bdaddr);
7334 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7336 ev->flags = cpu_to_le32(flags);
7339 /* When using service discovery and a list of UUID is
7340 * provided, results with no matching UUID should be
7341 * dropped. In case there is a match the result is
7342 * kept and checking possible scan response data
7345 if (hdev->discovery.uuid_count > 0) {
7346 match = eir_has_uuids(eir, eir_len,
7347 hdev->discovery.uuid_count,
7348 hdev->discovery.uuids);
7349 /* If duplicate filtering does not report RSSI changes,
7350 * then restart scanning to ensure updated result with
7351 * updated RSSI values.
7353 if (match && test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
7355 restart_le_scan(hdev);
7360 if (!match && !scan_rsp_len)
7363 /* Copy EIR or advertising data into event */
7364 memcpy(ev->eir, eir, eir_len);
7366 /* When using service discovery and a list of UUID is
7367 * provided, results with empty EIR or advertising data
7368 * should be dropped since they do not match any UUID.
7370 if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
7376 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7377 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7380 if (scan_rsp_len > 0) {
7381 /* When using service discovery and a list of UUID is
7382 * provided, results with no matching UUID should be
7383 * dropped if there is no previous match from the
7386 if (hdev->discovery.uuid_count > 0) {
7387 if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
7388 hdev->discovery.uuid_count,
7389 hdev->discovery.uuids))
7392 /* If duplicate filtering does not report RSSI changes,
7393 * then restart scanning to ensure updated result with
7394 * updated RSSI values.
7396 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
7398 restart_le_scan(hdev);
7401 /* Append scan response data to event */
7402 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7404 /* When using service discovery and a list of UUID is
7405 * provided, results with empty scan response and no
7406 * previous matched advertising data should be dropped.
7408 if (hdev->discovery.uuid_count > 0 && !match)
7412 /* Validate the reported RSSI value against the RSSI threshold once more
7413 * incase HCI_QUIRK_STRICT_DUPLICATE_FILTER forced a restart of LE
7416 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7417 rssi < hdev->discovery.rssi)
7420 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7421 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7423 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7426 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7427 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7429 struct mgmt_ev_device_found *ev;
7430 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7433 ev = (struct mgmt_ev_device_found *) buf;
7435 memset(buf, 0, sizeof(buf));
7437 bacpy(&ev->addr.bdaddr, bdaddr);
7438 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7441 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7444 ev->eir_len = cpu_to_le16(eir_len);
7446 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7449 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7451 struct mgmt_ev_discovering ev;
7453 BT_DBG("%s discovering %u", hdev->name, discovering);
7455 memset(&ev, 0, sizeof(ev));
7456 ev.type = hdev->discovery.type;
7457 ev.discovering = discovering;
7459 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7462 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7464 BT_DBG("%s status %u", hdev->name, status);
7467 void mgmt_reenable_advertising(struct hci_dev *hdev)
7469 struct hci_request req;
7471 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7474 hci_req_init(&req, hdev);
7475 enable_advertising(&req);
7476 hci_req_run(&req, adv_enable_complete);