2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 9
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
51 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
105 static const u16 mgmt_events[] = {
106 MGMT_EV_CONTROLLER_ERROR,
108 MGMT_EV_INDEX_REMOVED,
109 MGMT_EV_NEW_SETTINGS,
110 MGMT_EV_CLASS_OF_DEV_CHANGED,
111 MGMT_EV_LOCAL_NAME_CHANGED,
112 MGMT_EV_NEW_LINK_KEY,
113 MGMT_EV_NEW_LONG_TERM_KEY,
114 MGMT_EV_DEVICE_CONNECTED,
115 MGMT_EV_DEVICE_DISCONNECTED,
116 MGMT_EV_CONNECT_FAILED,
117 MGMT_EV_PIN_CODE_REQUEST,
118 MGMT_EV_USER_CONFIRM_REQUEST,
119 MGMT_EV_USER_PASSKEY_REQUEST,
121 MGMT_EV_DEVICE_FOUND,
123 MGMT_EV_DEVICE_BLOCKED,
124 MGMT_EV_DEVICE_UNBLOCKED,
125 MGMT_EV_DEVICE_UNPAIRED,
126 MGMT_EV_PASSKEY_NOTIFY,
129 MGMT_EV_DEVICE_ADDED,
130 MGMT_EV_DEVICE_REMOVED,
131 MGMT_EV_NEW_CONN_PARAM,
132 MGMT_EV_UNCONF_INDEX_ADDED,
133 MGMT_EV_UNCONF_INDEX_REMOVED,
134 MGMT_EV_NEW_CONFIG_OPTIONS,
135 MGMT_EV_EXT_INDEX_ADDED,
136 MGMT_EV_EXT_INDEX_REMOVED,
137 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
142 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
143 "\x00\x00\x00\x00\x00\x00\x00\x00"
145 /* HCI to MGMT error code conversion table */
146 static u8 mgmt_status_table[] = {
148 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
149 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
150 MGMT_STATUS_FAILED, /* Hardware Failure */
151 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
152 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
153 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
154 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
155 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
157 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
158 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
159 MGMT_STATUS_BUSY, /* Command Disallowed */
160 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
161 MGMT_STATUS_REJECTED, /* Rejected Security */
162 MGMT_STATUS_REJECTED, /* Rejected Personal */
163 MGMT_STATUS_TIMEOUT, /* Host Timeout */
164 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
165 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
166 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
167 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
168 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
169 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
170 MGMT_STATUS_BUSY, /* Repeated Attempts */
171 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
172 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
173 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
174 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
175 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
176 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
177 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
178 MGMT_STATUS_FAILED, /* Unspecified Error */
179 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
180 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
181 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
182 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
183 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
184 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
185 MGMT_STATUS_FAILED, /* Unit Link Key Used */
186 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
187 MGMT_STATUS_TIMEOUT, /* Instant Passed */
188 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
189 MGMT_STATUS_FAILED, /* Transaction Collision */
190 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
191 MGMT_STATUS_REJECTED, /* QoS Rejected */
192 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
193 MGMT_STATUS_REJECTED, /* Insufficient Security */
194 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
195 MGMT_STATUS_BUSY, /* Role Switch Pending */
196 MGMT_STATUS_FAILED, /* Slot Violation */
197 MGMT_STATUS_FAILED, /* Role Switch Failed */
198 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
199 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
200 MGMT_STATUS_BUSY, /* Host Busy Pairing */
201 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
202 MGMT_STATUS_BUSY, /* Controller Busy */
203 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
204 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
205 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
206 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
207 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
210 static u8 mgmt_status(u8 hci_status)
212 if (hci_status < ARRAY_SIZE(mgmt_status_table))
213 return mgmt_status_table[hci_status];
215 return MGMT_STATUS_FAILED;
218 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
221 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
225 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
226 u16 len, int flag, struct sock *skip_sk)
228 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
232 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
233 u16 len, struct sock *skip_sk)
235 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
236 HCI_MGMT_GENERIC_EVENTS, skip_sk);
239 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
240 struct sock *skip_sk)
242 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
243 HCI_SOCK_TRUSTED, skip_sk);
246 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
249 struct mgmt_rp_read_version rp;
251 BT_DBG("sock %p", sk);
253 rp.version = MGMT_VERSION;
254 rp.revision = cpu_to_le16(MGMT_REVISION);
256 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
260 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
263 struct mgmt_rp_read_commands *rp;
264 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
265 const u16 num_events = ARRAY_SIZE(mgmt_events);
270 BT_DBG("sock %p", sk);
272 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
274 rp = kmalloc(rp_size, GFP_KERNEL);
278 rp->num_commands = cpu_to_le16(num_commands);
279 rp->num_events = cpu_to_le16(num_events);
281 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
282 put_unaligned_le16(mgmt_commands[i], opcode);
284 for (i = 0; i < num_events; i++, opcode++)
285 put_unaligned_le16(mgmt_events[i], opcode);
287 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
294 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
297 struct mgmt_rp_read_index_list *rp;
303 BT_DBG("sock %p", sk);
305 read_lock(&hci_dev_list_lock);
308 list_for_each_entry(d, &hci_dev_list, list) {
309 if (d->dev_type == HCI_BREDR &&
310 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
314 rp_len = sizeof(*rp) + (2 * count);
315 rp = kmalloc(rp_len, GFP_ATOMIC);
317 read_unlock(&hci_dev_list_lock);
322 list_for_each_entry(d, &hci_dev_list, list) {
323 if (hci_dev_test_flag(d, HCI_SETUP) ||
324 hci_dev_test_flag(d, HCI_CONFIG) ||
325 hci_dev_test_flag(d, HCI_USER_CHANNEL))
328 /* Devices marked as raw-only are neither configured
329 * nor unconfigured controllers.
331 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
334 if (d->dev_type == HCI_BREDR &&
335 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
336 rp->index[count++] = cpu_to_le16(d->id);
337 BT_DBG("Added hci%u", d->id);
341 rp->num_controllers = cpu_to_le16(count);
342 rp_len = sizeof(*rp) + (2 * count);
344 read_unlock(&hci_dev_list_lock);
346 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
354 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
355 void *data, u16 data_len)
357 struct mgmt_rp_read_unconf_index_list *rp;
363 BT_DBG("sock %p", sk);
365 read_lock(&hci_dev_list_lock);
368 list_for_each_entry(d, &hci_dev_list, list) {
369 if (d->dev_type == HCI_BREDR &&
370 hci_dev_test_flag(d, HCI_UNCONFIGURED))
374 rp_len = sizeof(*rp) + (2 * count);
375 rp = kmalloc(rp_len, GFP_ATOMIC);
377 read_unlock(&hci_dev_list_lock);
382 list_for_each_entry(d, &hci_dev_list, list) {
383 if (hci_dev_test_flag(d, HCI_SETUP) ||
384 hci_dev_test_flag(d, HCI_CONFIG) ||
385 hci_dev_test_flag(d, HCI_USER_CHANNEL))
388 /* Devices marked as raw-only are neither configured
389 * nor unconfigured controllers.
391 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
394 if (d->dev_type == HCI_BREDR &&
395 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
396 rp->index[count++] = cpu_to_le16(d->id);
397 BT_DBG("Added hci%u", d->id);
401 rp->num_controllers = cpu_to_le16(count);
402 rp_len = sizeof(*rp) + (2 * count);
404 read_unlock(&hci_dev_list_lock);
406 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
407 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
414 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
415 void *data, u16 data_len)
417 struct mgmt_rp_read_ext_index_list *rp;
423 BT_DBG("sock %p", sk);
425 read_lock(&hci_dev_list_lock);
428 list_for_each_entry(d, &hci_dev_list, list) {
429 if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
433 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
434 rp = kmalloc(rp_len, GFP_ATOMIC);
436 read_unlock(&hci_dev_list_lock);
441 list_for_each_entry(d, &hci_dev_list, list) {
442 if (hci_dev_test_flag(d, HCI_SETUP) ||
443 hci_dev_test_flag(d, HCI_CONFIG) ||
444 hci_dev_test_flag(d, HCI_USER_CHANNEL))
447 /* Devices marked as raw-only are neither configured
448 * nor unconfigured controllers.
450 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
453 if (d->dev_type == HCI_BREDR) {
454 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
455 rp->entry[count].type = 0x01;
457 rp->entry[count].type = 0x00;
458 } else if (d->dev_type == HCI_AMP) {
459 rp->entry[count].type = 0x02;
464 rp->entry[count].bus = d->bus;
465 rp->entry[count++].index = cpu_to_le16(d->id);
466 BT_DBG("Added hci%u", d->id);
469 rp->num_controllers = cpu_to_le16(count);
470 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
472 read_unlock(&hci_dev_list_lock);
474 /* If this command is called at least once, then all the
475 * default index and unconfigured index events are disabled
476 * and from now on only extended index events are used.
478 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
479 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
480 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
482 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
483 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
490 static bool is_configured(struct hci_dev *hdev)
492 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
493 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
496 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
497 !bacmp(&hdev->public_addr, BDADDR_ANY))
503 static __le32 get_missing_options(struct hci_dev *hdev)
507 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
508 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
509 options |= MGMT_OPTION_EXTERNAL_CONFIG;
511 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
512 !bacmp(&hdev->public_addr, BDADDR_ANY))
513 options |= MGMT_OPTION_PUBLIC_ADDRESS;
515 return cpu_to_le32(options);
518 static int new_options(struct hci_dev *hdev, struct sock *skip)
520 __le32 options = get_missing_options(hdev);
522 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
523 sizeof(options), skip);
526 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
528 __le32 options = get_missing_options(hdev);
530 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
534 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
535 void *data, u16 data_len)
537 struct mgmt_rp_read_config_info rp;
540 BT_DBG("sock %p %s", sk, hdev->name);
544 memset(&rp, 0, sizeof(rp));
545 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
547 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
548 options |= MGMT_OPTION_EXTERNAL_CONFIG;
550 if (hdev->set_bdaddr)
551 options |= MGMT_OPTION_PUBLIC_ADDRESS;
553 rp.supported_options = cpu_to_le32(options);
554 rp.missing_options = get_missing_options(hdev);
556 hci_dev_unlock(hdev);
558 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
562 static u32 get_supported_settings(struct hci_dev *hdev)
566 settings |= MGMT_SETTING_POWERED;
567 settings |= MGMT_SETTING_BONDABLE;
568 settings |= MGMT_SETTING_DEBUG_KEYS;
569 settings |= MGMT_SETTING_CONNECTABLE;
570 settings |= MGMT_SETTING_DISCOVERABLE;
572 if (lmp_bredr_capable(hdev)) {
573 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
574 settings |= MGMT_SETTING_FAST_CONNECTABLE;
575 settings |= MGMT_SETTING_BREDR;
576 settings |= MGMT_SETTING_LINK_SECURITY;
578 if (lmp_ssp_capable(hdev)) {
579 settings |= MGMT_SETTING_SSP;
580 settings |= MGMT_SETTING_HS;
583 if (lmp_sc_capable(hdev))
584 settings |= MGMT_SETTING_SECURE_CONN;
587 if (lmp_le_capable(hdev)) {
588 settings |= MGMT_SETTING_LE;
589 settings |= MGMT_SETTING_ADVERTISING;
590 settings |= MGMT_SETTING_SECURE_CONN;
591 settings |= MGMT_SETTING_PRIVACY;
592 settings |= MGMT_SETTING_STATIC_ADDRESS;
595 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
597 settings |= MGMT_SETTING_CONFIGURATION;
602 static u32 get_current_settings(struct hci_dev *hdev)
606 if (hdev_is_powered(hdev))
607 settings |= MGMT_SETTING_POWERED;
609 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
610 settings |= MGMT_SETTING_CONNECTABLE;
612 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
613 settings |= MGMT_SETTING_FAST_CONNECTABLE;
615 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
616 settings |= MGMT_SETTING_DISCOVERABLE;
618 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
619 settings |= MGMT_SETTING_BONDABLE;
621 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
622 settings |= MGMT_SETTING_BREDR;
624 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
625 settings |= MGMT_SETTING_LE;
627 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
628 settings |= MGMT_SETTING_LINK_SECURITY;
630 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
631 settings |= MGMT_SETTING_SSP;
633 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
634 settings |= MGMT_SETTING_HS;
636 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
637 settings |= MGMT_SETTING_ADVERTISING;
639 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
640 settings |= MGMT_SETTING_SECURE_CONN;
642 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
643 settings |= MGMT_SETTING_DEBUG_KEYS;
645 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
646 settings |= MGMT_SETTING_PRIVACY;
648 /* The current setting for static address has two purposes. The
649 * first is to indicate if the static address will be used and
650 * the second is to indicate if it is actually set.
652 * This means if the static address is not configured, this flag
653 * will never bet set. If the address is configured, then if the
654 * address is actually used decides if the flag is set or not.
656 * For single mode LE only controllers and dual-mode controllers
657 * with BR/EDR disabled, the existence of the static address will
660 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
661 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
662 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
663 if (bacmp(&hdev->static_addr, BDADDR_ANY))
664 settings |= MGMT_SETTING_STATIC_ADDRESS;
670 #define PNP_INFO_SVCLASS_ID 0x1200
672 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
674 u8 *ptr = data, *uuids_start = NULL;
675 struct bt_uuid *uuid;
680 list_for_each_entry(uuid, &hdev->uuids, list) {
683 if (uuid->size != 16)
686 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
690 if (uuid16 == PNP_INFO_SVCLASS_ID)
696 uuids_start[1] = EIR_UUID16_ALL;
700 /* Stop if not enough space to put next UUID */
701 if ((ptr - data) + sizeof(u16) > len) {
702 uuids_start[1] = EIR_UUID16_SOME;
706 *ptr++ = (uuid16 & 0x00ff);
707 *ptr++ = (uuid16 & 0xff00) >> 8;
708 uuids_start[0] += sizeof(uuid16);
714 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
716 u8 *ptr = data, *uuids_start = NULL;
717 struct bt_uuid *uuid;
722 list_for_each_entry(uuid, &hdev->uuids, list) {
723 if (uuid->size != 32)
729 uuids_start[1] = EIR_UUID32_ALL;
733 /* Stop if not enough space to put next UUID */
734 if ((ptr - data) + sizeof(u32) > len) {
735 uuids_start[1] = EIR_UUID32_SOME;
739 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
741 uuids_start[0] += sizeof(u32);
747 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
749 u8 *ptr = data, *uuids_start = NULL;
750 struct bt_uuid *uuid;
755 list_for_each_entry(uuid, &hdev->uuids, list) {
756 if (uuid->size != 128)
762 uuids_start[1] = EIR_UUID128_ALL;
766 /* Stop if not enough space to put next UUID */
767 if ((ptr - data) + 16 > len) {
768 uuids_start[1] = EIR_UUID128_SOME;
772 memcpy(ptr, uuid->uuid, 16);
774 uuids_start[0] += 16;
780 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
782 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
785 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
786 struct hci_dev *hdev,
789 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
792 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
797 name_len = strlen(hdev->dev_name);
799 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
801 if (name_len > max_len) {
803 ptr[1] = EIR_NAME_SHORT;
805 ptr[1] = EIR_NAME_COMPLETE;
807 ptr[0] = name_len + 1;
809 memcpy(ptr + 2, hdev->dev_name, name_len);
811 ad_len += (name_len + 2);
812 ptr += (name_len + 2);
818 static void update_scan_rsp_data(struct hci_request *req)
820 struct hci_dev *hdev = req->hdev;
821 struct hci_cp_le_set_scan_rsp_data cp;
824 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
827 memset(&cp, 0, sizeof(cp));
829 len = create_scan_rsp_data(hdev, cp.data);
831 if (hdev->scan_rsp_data_len == len &&
832 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
835 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
836 hdev->scan_rsp_data_len = len;
840 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
843 static u8 get_adv_discov_flags(struct hci_dev *hdev)
845 struct mgmt_pending_cmd *cmd;
847 /* If there's a pending mgmt command the flags will not yet have
848 * their final values, so check for this first.
850 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
852 struct mgmt_mode *cp = cmd->param;
854 return LE_AD_GENERAL;
855 else if (cp->val == 0x02)
856 return LE_AD_LIMITED;
858 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
859 return LE_AD_LIMITED;
860 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
861 return LE_AD_GENERAL;
867 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
869 u8 ad_len = 0, flags = 0;
871 flags |= get_adv_discov_flags(hdev);
873 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
874 flags |= LE_AD_NO_BREDR;
877 BT_DBG("adv flags 0x%02x", flags);
887 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
889 ptr[1] = EIR_TX_POWER;
890 ptr[2] = (u8) hdev->adv_tx_power;
899 static void update_adv_data(struct hci_request *req)
901 struct hci_dev *hdev = req->hdev;
902 struct hci_cp_le_set_adv_data cp;
905 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
908 memset(&cp, 0, sizeof(cp));
910 len = create_adv_data(hdev, cp.data);
912 if (hdev->adv_data_len == len &&
913 memcmp(cp.data, hdev->adv_data, len) == 0)
916 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
917 hdev->adv_data_len = len;
921 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
924 int mgmt_update_adv_data(struct hci_dev *hdev)
926 struct hci_request req;
928 hci_req_init(&req, hdev);
929 update_adv_data(&req);
931 return hci_req_run(&req, NULL);
934 static void create_eir(struct hci_dev *hdev, u8 *data)
939 name_len = strlen(hdev->dev_name);
945 ptr[1] = EIR_NAME_SHORT;
947 ptr[1] = EIR_NAME_COMPLETE;
949 /* EIR Data length */
950 ptr[0] = name_len + 1;
952 memcpy(ptr + 2, hdev->dev_name, name_len);
954 ptr += (name_len + 2);
957 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
959 ptr[1] = EIR_TX_POWER;
960 ptr[2] = (u8) hdev->inq_tx_power;
965 if (hdev->devid_source > 0) {
967 ptr[1] = EIR_DEVICE_ID;
969 put_unaligned_le16(hdev->devid_source, ptr + 2);
970 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
971 put_unaligned_le16(hdev->devid_product, ptr + 6);
972 put_unaligned_le16(hdev->devid_version, ptr + 8);
977 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
978 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
979 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
982 static void update_eir(struct hci_request *req)
984 struct hci_dev *hdev = req->hdev;
985 struct hci_cp_write_eir cp;
987 if (!hdev_is_powered(hdev))
990 if (!lmp_ext_inq_capable(hdev))
993 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
996 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
999 memset(&cp, 0, sizeof(cp));
1001 create_eir(hdev, cp.data);
1003 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1006 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1008 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1011 static u8 get_service_classes(struct hci_dev *hdev)
1013 struct bt_uuid *uuid;
1016 list_for_each_entry(uuid, &hdev->uuids, list)
1017 val |= uuid->svc_hint;
1022 static void update_class(struct hci_request *req)
1024 struct hci_dev *hdev = req->hdev;
1027 BT_DBG("%s", hdev->name);
1029 if (!hdev_is_powered(hdev))
1032 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1035 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1038 cod[0] = hdev->minor_class;
1039 cod[1] = hdev->major_class;
1040 cod[2] = get_service_classes(hdev);
1042 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1045 if (memcmp(cod, hdev->dev_class, 3) == 0)
1048 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1051 static bool get_connectable(struct hci_dev *hdev)
1053 struct mgmt_pending_cmd *cmd;
1055 /* If there's a pending mgmt command the flag will not yet have
1056 * it's final value, so check for this first.
1058 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1060 struct mgmt_mode *cp = cmd->param;
1064 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1067 static void disable_advertising(struct hci_request *req)
1071 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1074 static void enable_advertising(struct hci_request *req)
1076 struct hci_dev *hdev = req->hdev;
1077 struct hci_cp_le_set_adv_param cp;
1078 u8 own_addr_type, enable = 0x01;
1081 if (hci_conn_num(hdev, LE_LINK) > 0)
1084 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1085 disable_advertising(req);
1087 /* Clear the HCI_LE_ADV bit temporarily so that the
1088 * hci_update_random_address knows that it's safe to go ahead
1089 * and write a new random address. The flag will be set back on
1090 * as soon as the SET_ADV_ENABLE HCI command completes.
1092 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1094 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1097 connectable = get_connectable(hdev);
1099 /* Set require_privacy to true only when non-connectable
1100 * advertising is used. In that case it is fine to use a
1101 * non-resolvable private address.
1103 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1106 memset(&cp, 0, sizeof(cp));
1107 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1108 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1109 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1110 cp.own_address_type = own_addr_type;
1111 cp.channel_map = hdev->le_adv_channel_map;
1113 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1115 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1118 static void service_cache_off(struct work_struct *work)
1120 struct hci_dev *hdev = container_of(work, struct hci_dev,
1121 service_cache.work);
1122 struct hci_request req;
1124 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1127 hci_req_init(&req, hdev);
1134 hci_dev_unlock(hdev);
1136 hci_req_run(&req, NULL);
1139 static void rpa_expired(struct work_struct *work)
1141 struct hci_dev *hdev = container_of(work, struct hci_dev,
1143 struct hci_request req;
1147 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1149 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1152 /* The generation of a new RPA and programming it into the
1153 * controller happens in the enable_advertising() function.
1155 hci_req_init(&req, hdev);
1156 enable_advertising(&req);
1157 hci_req_run(&req, NULL);
1160 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1162 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1165 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1166 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1168 /* Non-mgmt controlled devices get this bit set
1169 * implicitly so that pairing works for them, however
1170 * for mgmt we require user-space to explicitly enable
1173 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1176 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1177 void *data, u16 data_len)
1179 struct mgmt_rp_read_info rp;
1181 BT_DBG("sock %p %s", sk, hdev->name);
1185 memset(&rp, 0, sizeof(rp));
1187 bacpy(&rp.bdaddr, &hdev->bdaddr);
1189 rp.version = hdev->hci_ver;
1190 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1192 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1193 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1195 memcpy(rp.dev_class, hdev->dev_class, 3);
1197 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1198 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1200 hci_dev_unlock(hdev);
1202 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1206 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1208 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1210 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1214 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1216 BT_DBG("%s status 0x%02x", hdev->name, status);
1218 if (hci_conn_count(hdev) == 0) {
1219 cancel_delayed_work(&hdev->power_off);
1220 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1224 static bool hci_stop_discovery(struct hci_request *req)
1226 struct hci_dev *hdev = req->hdev;
1227 struct hci_cp_remote_name_req_cancel cp;
1228 struct inquiry_entry *e;
1230 switch (hdev->discovery.state) {
1231 case DISCOVERY_FINDING:
1232 if (test_bit(HCI_INQUIRY, &hdev->flags))
1233 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1235 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1236 cancel_delayed_work(&hdev->le_scan_disable);
1237 hci_req_add_le_scan_disable(req);
1242 case DISCOVERY_RESOLVING:
1243 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1248 bacpy(&cp.bdaddr, &e->data.bdaddr);
1249 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1255 /* Passive scanning */
1256 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1257 hci_req_add_le_scan_disable(req);
1267 static int clean_up_hci_state(struct hci_dev *hdev)
1269 struct hci_request req;
1270 struct hci_conn *conn;
1271 bool discov_stopped;
1274 hci_req_init(&req, hdev);
1276 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1277 test_bit(HCI_PSCAN, &hdev->flags)) {
1279 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1282 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1283 disable_advertising(&req);
1285 discov_stopped = hci_stop_discovery(&req);
1287 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1288 struct hci_cp_disconnect dc;
1289 struct hci_cp_reject_conn_req rej;
1291 switch (conn->state) {
1294 dc.handle = cpu_to_le16(conn->handle);
1295 dc.reason = 0x15; /* Terminated due to Power Off */
1296 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1299 if (conn->type == LE_LINK)
1300 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1302 else if (conn->type == ACL_LINK)
1303 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1307 bacpy(&rej.bdaddr, &conn->dst);
1308 rej.reason = 0x15; /* Terminated due to Power Off */
1309 if (conn->type == ACL_LINK)
1310 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1312 else if (conn->type == SCO_LINK)
1313 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1319 err = hci_req_run(&req, clean_up_hci_complete);
1320 if (!err && discov_stopped)
1321 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1326 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1329 struct mgmt_mode *cp = data;
1330 struct mgmt_pending_cmd *cmd;
1333 BT_DBG("request for %s", hdev->name);
1335 if (cp->val != 0x00 && cp->val != 0x01)
1336 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1337 MGMT_STATUS_INVALID_PARAMS);
1341 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1342 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1347 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1348 cancel_delayed_work(&hdev->power_off);
1351 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1353 err = mgmt_powered(hdev, 1);
1358 if (!!cp->val == hdev_is_powered(hdev)) {
1359 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1363 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1370 queue_work(hdev->req_workqueue, &hdev->power_on);
1373 /* Disconnect connections, stop scans, etc */
1374 err = clean_up_hci_state(hdev);
1376 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1377 HCI_POWER_OFF_TIMEOUT);
1379 /* ENODATA means there were no HCI commands queued */
1380 if (err == -ENODATA) {
1381 cancel_delayed_work(&hdev->power_off);
1382 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1388 hci_dev_unlock(hdev);
1392 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1394 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1396 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1400 int mgmt_new_settings(struct hci_dev *hdev)
1402 return new_settings(hdev, NULL);
1407 struct hci_dev *hdev;
1411 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1413 struct cmd_lookup *match = data;
1415 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1417 list_del(&cmd->list);
1419 if (match->sk == NULL) {
1420 match->sk = cmd->sk;
1421 sock_hold(match->sk);
1424 mgmt_pending_free(cmd);
1427 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1431 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1432 mgmt_pending_remove(cmd);
1435 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1437 if (cmd->cmd_complete) {
1440 cmd->cmd_complete(cmd, *status);
1441 mgmt_pending_remove(cmd);
1446 cmd_status_rsp(cmd, data);
1449 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1451 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1452 cmd->param, cmd->param_len);
1455 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1457 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1458 cmd->param, sizeof(struct mgmt_addr_info));
1461 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1463 if (!lmp_bredr_capable(hdev))
1464 return MGMT_STATUS_NOT_SUPPORTED;
1465 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1466 return MGMT_STATUS_REJECTED;
1468 return MGMT_STATUS_SUCCESS;
1471 static u8 mgmt_le_support(struct hci_dev *hdev)
1473 if (!lmp_le_capable(hdev))
1474 return MGMT_STATUS_NOT_SUPPORTED;
1475 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1476 return MGMT_STATUS_REJECTED;
1478 return MGMT_STATUS_SUCCESS;
1481 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1484 struct mgmt_pending_cmd *cmd;
1485 struct mgmt_mode *cp;
1486 struct hci_request req;
1489 BT_DBG("status 0x%02x", status);
1493 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1498 u8 mgmt_err = mgmt_status(status);
1499 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1500 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1506 changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1508 if (hdev->discov_timeout > 0) {
1509 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1510 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1514 changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1517 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1520 new_settings(hdev, cmd->sk);
1522 /* When the discoverable mode gets changed, make sure
1523 * that class of device has the limited discoverable
1524 * bit correctly set. Also update page scan based on whitelist
1527 hci_req_init(&req, hdev);
1528 __hci_update_page_scan(&req);
1530 hci_req_run(&req, NULL);
1533 mgmt_pending_remove(cmd);
1536 hci_dev_unlock(hdev);
1539 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1542 struct mgmt_cp_set_discoverable *cp = data;
1543 struct mgmt_pending_cmd *cmd;
1544 struct hci_request req;
1549 BT_DBG("request for %s", hdev->name);
1551 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1552 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1553 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1554 MGMT_STATUS_REJECTED);
1556 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1557 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1558 MGMT_STATUS_INVALID_PARAMS);
1560 timeout = __le16_to_cpu(cp->timeout);
1562 /* Disabling discoverable requires that no timeout is set,
1563 * and enabling limited discoverable requires a timeout.
1565 if ((cp->val == 0x00 && timeout > 0) ||
1566 (cp->val == 0x02 && timeout == 0))
1567 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1568 MGMT_STATUS_INVALID_PARAMS);
1572 if (!hdev_is_powered(hdev) && timeout > 0) {
1573 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1574 MGMT_STATUS_NOT_POWERED);
1578 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1579 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1580 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1585 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1586 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 MGMT_STATUS_REJECTED);
1591 if (!hdev_is_powered(hdev)) {
1592 bool changed = false;
1594 /* Setting limited discoverable when powered off is
1595 * not a valid operation since it requires a timeout
1596 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1598 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1599 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1603 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1608 err = new_settings(hdev, sk);
1613 /* If the current mode is the same, then just update the timeout
1614 * value with the new value. And if only the timeout gets updated,
1615 * then no need for any HCI transactions.
1617 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1618 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1619 HCI_LIMITED_DISCOVERABLE)) {
1620 cancel_delayed_work(&hdev->discov_off);
1621 hdev->discov_timeout = timeout;
1623 if (cp->val && hdev->discov_timeout > 0) {
1624 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1625 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1629 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1633 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1639 /* Cancel any potential discoverable timeout that might be
1640 * still active and store new timeout value. The arming of
1641 * the timeout happens in the complete handler.
1643 cancel_delayed_work(&hdev->discov_off);
1644 hdev->discov_timeout = timeout;
1646 /* Limited discoverable mode */
1647 if (cp->val == 0x02)
1648 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1650 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1652 hci_req_init(&req, hdev);
1654 /* The procedure for LE-only controllers is much simpler - just
1655 * update the advertising data.
1657 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1663 struct hci_cp_write_current_iac_lap hci_cp;
1665 if (cp->val == 0x02) {
1666 /* Limited discoverable mode */
1667 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1668 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1669 hci_cp.iac_lap[1] = 0x8b;
1670 hci_cp.iac_lap[2] = 0x9e;
1671 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1672 hci_cp.iac_lap[4] = 0x8b;
1673 hci_cp.iac_lap[5] = 0x9e;
1675 /* General discoverable mode */
1677 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1678 hci_cp.iac_lap[1] = 0x8b;
1679 hci_cp.iac_lap[2] = 0x9e;
1682 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1683 (hci_cp.num_iac * 3) + 1, &hci_cp);
1685 scan |= SCAN_INQUIRY;
1687 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1690 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1693 update_adv_data(&req);
1695 err = hci_req_run(&req, set_discoverable_complete);
1697 mgmt_pending_remove(cmd);
1700 hci_dev_unlock(hdev);
1704 static void write_fast_connectable(struct hci_request *req, bool enable)
1706 struct hci_dev *hdev = req->hdev;
1707 struct hci_cp_write_page_scan_activity acp;
1710 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1713 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1717 type = PAGE_SCAN_TYPE_INTERLACED;
1719 /* 160 msec page scan interval */
1720 acp.interval = cpu_to_le16(0x0100);
1722 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1724 /* default 1.28 sec page scan */
1725 acp.interval = cpu_to_le16(0x0800);
1728 acp.window = cpu_to_le16(0x0012);
1730 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1731 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1732 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1735 if (hdev->page_scan_type != type)
1736 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1739 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1742 struct mgmt_pending_cmd *cmd;
1743 struct mgmt_mode *cp;
1744 bool conn_changed, discov_changed;
1746 BT_DBG("status 0x%02x", status);
1750 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1755 u8 mgmt_err = mgmt_status(status);
1756 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1762 conn_changed = !hci_dev_test_and_set_flag(hdev,
1764 discov_changed = false;
1766 conn_changed = hci_dev_test_and_clear_flag(hdev,
1768 discov_changed = hci_dev_test_and_clear_flag(hdev,
1772 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1774 if (conn_changed || discov_changed) {
1775 new_settings(hdev, cmd->sk);
1776 hci_update_page_scan(hdev);
1778 mgmt_update_adv_data(hdev);
1779 hci_update_background_scan(hdev);
1783 mgmt_pending_remove(cmd);
1786 hci_dev_unlock(hdev);
1789 static int set_connectable_update_settings(struct hci_dev *hdev,
1790 struct sock *sk, u8 val)
1792 bool changed = false;
1795 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1799 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1801 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1802 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1805 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1810 hci_update_page_scan(hdev);
1811 hci_update_background_scan(hdev);
1812 return new_settings(hdev, sk);
1818 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1821 struct mgmt_mode *cp = data;
1822 struct mgmt_pending_cmd *cmd;
1823 struct hci_request req;
1827 BT_DBG("request for %s", hdev->name);
1829 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1830 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1831 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1832 MGMT_STATUS_REJECTED);
1834 if (cp->val != 0x00 && cp->val != 0x01)
1835 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1836 MGMT_STATUS_INVALID_PARAMS);
1840 if (!hdev_is_powered(hdev)) {
1841 err = set_connectable_update_settings(hdev, sk, cp->val);
1845 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1846 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1847 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1852 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1858 hci_req_init(&req, hdev);
1860 /* If BR/EDR is not enabled and we disable advertising as a
1861 * by-product of disabling connectable, we need to update the
1862 * advertising flags.
1864 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1866 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1867 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1869 update_adv_data(&req);
1870 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1874 /* If we don't have any whitelist entries just
1875 * disable all scanning. If there are entries
1876 * and we had both page and inquiry scanning
1877 * enabled then fall back to only page scanning.
1878 * Otherwise no changes are needed.
1880 if (list_empty(&hdev->whitelist))
1881 scan = SCAN_DISABLED;
1882 else if (test_bit(HCI_ISCAN, &hdev->flags))
1885 goto no_scan_update;
1887 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1888 hdev->discov_timeout > 0)
1889 cancel_delayed_work(&hdev->discov_off);
1892 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1896 /* Update the advertising parameters if necessary */
1897 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
1898 enable_advertising(&req);
1900 err = hci_req_run(&req, set_connectable_complete);
1902 mgmt_pending_remove(cmd);
1903 if (err == -ENODATA)
1904 err = set_connectable_update_settings(hdev, sk,
1910 hci_dev_unlock(hdev);
1914 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1917 struct mgmt_mode *cp = data;
1921 BT_DBG("request for %s", hdev->name);
1923 if (cp->val != 0x00 && cp->val != 0x01)
1924 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1925 MGMT_STATUS_INVALID_PARAMS);
1930 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1932 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1934 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1939 err = new_settings(hdev, sk);
1942 hci_dev_unlock(hdev);
1946 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1949 struct mgmt_mode *cp = data;
1950 struct mgmt_pending_cmd *cmd;
1954 BT_DBG("request for %s", hdev->name);
1956 status = mgmt_bredr_support(hdev);
1958 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1961 if (cp->val != 0x00 && cp->val != 0x01)
1962 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1963 MGMT_STATUS_INVALID_PARAMS);
1967 if (!hdev_is_powered(hdev)) {
1968 bool changed = false;
1970 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1971 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1975 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1980 err = new_settings(hdev, sk);
1985 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1986 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1993 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1994 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1998 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2004 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2006 mgmt_pending_remove(cmd);
2011 hci_dev_unlock(hdev);
2015 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2017 struct mgmt_mode *cp = data;
2018 struct mgmt_pending_cmd *cmd;
2022 BT_DBG("request for %s", hdev->name);
2024 status = mgmt_bredr_support(hdev);
2026 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2028 if (!lmp_ssp_capable(hdev))
2029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2030 MGMT_STATUS_NOT_SUPPORTED);
2032 if (cp->val != 0x00 && cp->val != 0x01)
2033 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2034 MGMT_STATUS_INVALID_PARAMS);
2038 if (!hdev_is_powered(hdev)) {
2042 changed = !hci_dev_test_and_set_flag(hdev,
2045 changed = hci_dev_test_and_clear_flag(hdev,
2048 changed = hci_dev_test_and_clear_flag(hdev,
2051 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2054 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2059 err = new_settings(hdev, sk);
2064 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2065 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2070 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2071 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2075 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2081 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2082 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2083 sizeof(cp->val), &cp->val);
2085 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2087 mgmt_pending_remove(cmd);
2092 hci_dev_unlock(hdev);
2096 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2098 struct mgmt_mode *cp = data;
2103 BT_DBG("request for %s", hdev->name);
2105 status = mgmt_bredr_support(hdev);
2107 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2109 if (!lmp_ssp_capable(hdev))
2110 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2111 MGMT_STATUS_NOT_SUPPORTED);
2113 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2114 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2115 MGMT_STATUS_REJECTED);
2117 if (cp->val != 0x00 && cp->val != 0x01)
2118 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2119 MGMT_STATUS_INVALID_PARAMS);
2123 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2124 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2130 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2132 if (hdev_is_powered(hdev)) {
2133 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2134 MGMT_STATUS_REJECTED);
2138 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2141 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2146 err = new_settings(hdev, sk);
2149 hci_dev_unlock(hdev);
2153 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2155 struct cmd_lookup match = { NULL, hdev };
2160 u8 mgmt_err = mgmt_status(status);
2162 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2167 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2169 new_settings(hdev, match.sk);
2174 /* Make sure the controller has a good default for
2175 * advertising data. Restrict the update to when LE
2176 * has actually been enabled. During power on, the
2177 * update in powered_update_hci will take care of it.
2179 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2180 struct hci_request req;
2182 hci_req_init(&req, hdev);
2183 update_adv_data(&req);
2184 update_scan_rsp_data(&req);
2185 __hci_update_background_scan(&req);
2186 hci_req_run(&req, NULL);
2190 hci_dev_unlock(hdev);
2193 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2195 struct mgmt_mode *cp = data;
2196 struct hci_cp_write_le_host_supported hci_cp;
2197 struct mgmt_pending_cmd *cmd;
2198 struct hci_request req;
2202 BT_DBG("request for %s", hdev->name);
2204 if (!lmp_le_capable(hdev))
2205 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2206 MGMT_STATUS_NOT_SUPPORTED);
2208 if (cp->val != 0x00 && cp->val != 0x01)
2209 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2210 MGMT_STATUS_INVALID_PARAMS);
2212 /* LE-only devices do not allow toggling LE on/off */
2213 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2214 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2215 MGMT_STATUS_REJECTED);
2220 enabled = lmp_host_le_capable(hdev);
2222 if (!hdev_is_powered(hdev) || val == enabled) {
2223 bool changed = false;
2225 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2226 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2230 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2231 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2235 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2240 err = new_settings(hdev, sk);
2245 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2246 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2247 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2252 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2258 hci_req_init(&req, hdev);
2260 memset(&hci_cp, 0, sizeof(hci_cp));
2264 hci_cp.simul = 0x00;
2266 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2267 disable_advertising(&req);
2270 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2273 err = hci_req_run(&req, le_enable_complete);
2275 mgmt_pending_remove(cmd);
2278 hci_dev_unlock(hdev);
2282 /* This is a helper function to test for pending mgmt commands that can
2283 * cause CoD or EIR HCI commands. We can only allow one such pending
2284 * mgmt command at a time since otherwise we cannot easily track what
2285 * the current values are, will be, and based on that calculate if a new
2286 * HCI command needs to be sent and if yes with what value.
2288 static bool pending_eir_or_class(struct hci_dev *hdev)
2290 struct mgmt_pending_cmd *cmd;
2292 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2293 switch (cmd->opcode) {
2294 case MGMT_OP_ADD_UUID:
2295 case MGMT_OP_REMOVE_UUID:
2296 case MGMT_OP_SET_DEV_CLASS:
2297 case MGMT_OP_SET_POWERED:
2305 static const u8 bluetooth_base_uuid[] = {
2306 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2307 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2310 static u8 get_uuid_size(const u8 *uuid)
2314 if (memcmp(uuid, bluetooth_base_uuid, 12))
2317 val = get_unaligned_le32(&uuid[12]);
2324 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2326 struct mgmt_pending_cmd *cmd;
2330 cmd = pending_find(mgmt_op, hdev);
2334 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2335 mgmt_status(status), hdev->dev_class, 3);
2337 mgmt_pending_remove(cmd);
2340 hci_dev_unlock(hdev);
2343 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2345 BT_DBG("status 0x%02x", status);
2347 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2350 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2352 struct mgmt_cp_add_uuid *cp = data;
2353 struct mgmt_pending_cmd *cmd;
2354 struct hci_request req;
2355 struct bt_uuid *uuid;
2358 BT_DBG("request for %s", hdev->name);
2362 if (pending_eir_or_class(hdev)) {
2363 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2368 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2374 memcpy(uuid->uuid, cp->uuid, 16);
2375 uuid->svc_hint = cp->svc_hint;
2376 uuid->size = get_uuid_size(cp->uuid);
2378 list_add_tail(&uuid->list, &hdev->uuids);
2380 hci_req_init(&req, hdev);
2385 err = hci_req_run(&req, add_uuid_complete);
2387 if (err != -ENODATA)
2390 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2391 hdev->dev_class, 3);
2395 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2404 hci_dev_unlock(hdev);
2408 static bool enable_service_cache(struct hci_dev *hdev)
2410 if (!hdev_is_powered(hdev))
2413 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2414 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2422 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2424 BT_DBG("status 0x%02x", status);
2426 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2429 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2432 struct mgmt_cp_remove_uuid *cp = data;
2433 struct mgmt_pending_cmd *cmd;
2434 struct bt_uuid *match, *tmp;
2435 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2436 struct hci_request req;
2439 BT_DBG("request for %s", hdev->name);
2443 if (pending_eir_or_class(hdev)) {
2444 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2449 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2450 hci_uuids_clear(hdev);
2452 if (enable_service_cache(hdev)) {
2453 err = mgmt_cmd_complete(sk, hdev->id,
2454 MGMT_OP_REMOVE_UUID,
2455 0, hdev->dev_class, 3);
2464 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2465 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2468 list_del(&match->list);
2474 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2475 MGMT_STATUS_INVALID_PARAMS);
2480 hci_req_init(&req, hdev);
2485 err = hci_req_run(&req, remove_uuid_complete);
2487 if (err != -ENODATA)
2490 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2491 hdev->dev_class, 3);
2495 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2504 hci_dev_unlock(hdev);
2508 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2510 BT_DBG("status 0x%02x", status);
2512 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2515 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2518 struct mgmt_cp_set_dev_class *cp = data;
2519 struct mgmt_pending_cmd *cmd;
2520 struct hci_request req;
2523 BT_DBG("request for %s", hdev->name);
2525 if (!lmp_bredr_capable(hdev))
2526 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2527 MGMT_STATUS_NOT_SUPPORTED);
2531 if (pending_eir_or_class(hdev)) {
2532 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2537 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2538 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2539 MGMT_STATUS_INVALID_PARAMS);
2543 hdev->major_class = cp->major;
2544 hdev->minor_class = cp->minor;
2546 if (!hdev_is_powered(hdev)) {
2547 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2548 hdev->dev_class, 3);
2552 hci_req_init(&req, hdev);
2554 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2555 hci_dev_unlock(hdev);
2556 cancel_delayed_work_sync(&hdev->service_cache);
2563 err = hci_req_run(&req, set_class_complete);
2565 if (err != -ENODATA)
2568 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2569 hdev->dev_class, 3);
2573 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2582 hci_dev_unlock(hdev);
2586 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2589 struct mgmt_cp_load_link_keys *cp = data;
2590 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2591 sizeof(struct mgmt_link_key_info));
2592 u16 key_count, expected_len;
2596 BT_DBG("request for %s", hdev->name);
2598 if (!lmp_bredr_capable(hdev))
2599 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2600 MGMT_STATUS_NOT_SUPPORTED);
2602 key_count = __le16_to_cpu(cp->key_count);
2603 if (key_count > max_key_count) {
2604 BT_ERR("load_link_keys: too big key_count value %u",
2606 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2607 MGMT_STATUS_INVALID_PARAMS);
2610 expected_len = sizeof(*cp) + key_count *
2611 sizeof(struct mgmt_link_key_info);
2612 if (expected_len != len) {
2613 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2615 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2616 MGMT_STATUS_INVALID_PARAMS);
2619 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2620 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2621 MGMT_STATUS_INVALID_PARAMS);
2623 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2626 for (i = 0; i < key_count; i++) {
2627 struct mgmt_link_key_info *key = &cp->keys[i];
2629 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2630 return mgmt_cmd_status(sk, hdev->id,
2631 MGMT_OP_LOAD_LINK_KEYS,
2632 MGMT_STATUS_INVALID_PARAMS);
2637 hci_link_keys_clear(hdev);
2640 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2642 changed = hci_dev_test_and_clear_flag(hdev,
2643 HCI_KEEP_DEBUG_KEYS);
2646 new_settings(hdev, NULL);
2648 for (i = 0; i < key_count; i++) {
2649 struct mgmt_link_key_info *key = &cp->keys[i];
2651 /* Always ignore debug keys and require a new pairing if
2652 * the user wants to use them.
2654 if (key->type == HCI_LK_DEBUG_COMBINATION)
2657 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2658 key->type, key->pin_len, NULL);
2661 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2663 hci_dev_unlock(hdev);
2668 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2669 u8 addr_type, struct sock *skip_sk)
2671 struct mgmt_ev_device_unpaired ev;
2673 bacpy(&ev.addr.bdaddr, bdaddr);
2674 ev.addr.type = addr_type;
2676 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2680 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2683 struct mgmt_cp_unpair_device *cp = data;
2684 struct mgmt_rp_unpair_device rp;
2685 struct hci_cp_disconnect dc;
2686 struct mgmt_pending_cmd *cmd;
2687 struct hci_conn *conn;
2690 memset(&rp, 0, sizeof(rp));
2691 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2692 rp.addr.type = cp->addr.type;
2694 if (!bdaddr_type_is_valid(cp->addr.type))
2695 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2696 MGMT_STATUS_INVALID_PARAMS,
2699 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2700 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2701 MGMT_STATUS_INVALID_PARAMS,
2706 if (!hdev_is_powered(hdev)) {
2707 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2708 MGMT_STATUS_NOT_POWERED, &rp,
2713 if (cp->addr.type == BDADDR_BREDR) {
2714 /* If disconnection is requested, then look up the
2715 * connection. If the remote device is connected, it
2716 * will be later used to terminate the link.
2718 * Setting it to NULL explicitly will cause no
2719 * termination of the link.
2722 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2727 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2731 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2734 /* Defer clearing up the connection parameters
2735 * until closing to give a chance of keeping
2736 * them if a repairing happens.
2738 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2740 /* If disconnection is not requested, then
2741 * clear the connection variable so that the
2742 * link is not terminated.
2744 if (!cp->disconnect)
2748 if (cp->addr.type == BDADDR_LE_PUBLIC)
2749 addr_type = ADDR_LE_DEV_PUBLIC;
2751 addr_type = ADDR_LE_DEV_RANDOM;
2753 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2755 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2759 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2760 MGMT_STATUS_NOT_PAIRED, &rp,
2765 /* If the connection variable is set, then termination of the
2766 * link is requested.
2769 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2771 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2775 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2782 cmd->cmd_complete = addr_cmd_complete;
2784 dc.handle = cpu_to_le16(conn->handle);
2785 dc.reason = 0x13; /* Remote User Terminated Connection */
2786 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2788 mgmt_pending_remove(cmd);
2791 hci_dev_unlock(hdev);
2795 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2798 struct mgmt_cp_disconnect *cp = data;
2799 struct mgmt_rp_disconnect rp;
2800 struct mgmt_pending_cmd *cmd;
2801 struct hci_conn *conn;
2806 memset(&rp, 0, sizeof(rp));
2807 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2808 rp.addr.type = cp->addr.type;
2810 if (!bdaddr_type_is_valid(cp->addr.type))
2811 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2812 MGMT_STATUS_INVALID_PARAMS,
2817 if (!test_bit(HCI_UP, &hdev->flags)) {
2818 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2819 MGMT_STATUS_NOT_POWERED, &rp,
2824 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2825 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2826 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2830 if (cp->addr.type == BDADDR_BREDR)
2831 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2834 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2836 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2837 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2838 MGMT_STATUS_NOT_CONNECTED, &rp,
2843 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2849 cmd->cmd_complete = generic_cmd_complete;
2851 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2853 mgmt_pending_remove(cmd);
2856 hci_dev_unlock(hdev);
2860 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2862 switch (link_type) {
2864 switch (addr_type) {
2865 case ADDR_LE_DEV_PUBLIC:
2866 return BDADDR_LE_PUBLIC;
2869 /* Fallback to LE Random address type */
2870 return BDADDR_LE_RANDOM;
2874 /* Fallback to BR/EDR type */
2875 return BDADDR_BREDR;
2879 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2882 struct mgmt_rp_get_connections *rp;
2892 if (!hdev_is_powered(hdev)) {
2893 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2894 MGMT_STATUS_NOT_POWERED);
2899 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2900 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2904 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2905 rp = kmalloc(rp_len, GFP_KERNEL);
2912 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2913 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2915 bacpy(&rp->addr[i].bdaddr, &c->dst);
2916 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2917 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2922 rp->conn_count = cpu_to_le16(i);
2924 /* Recalculate length in case of filtered SCO connections, etc */
2925 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2927 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2933 hci_dev_unlock(hdev);
2937 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2938 struct mgmt_cp_pin_code_neg_reply *cp)
2940 struct mgmt_pending_cmd *cmd;
2943 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2948 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2949 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2951 mgmt_pending_remove(cmd);
2956 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2959 struct hci_conn *conn;
2960 struct mgmt_cp_pin_code_reply *cp = data;
2961 struct hci_cp_pin_code_reply reply;
2962 struct mgmt_pending_cmd *cmd;
2969 if (!hdev_is_powered(hdev)) {
2970 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2971 MGMT_STATUS_NOT_POWERED);
2975 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2977 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2978 MGMT_STATUS_NOT_CONNECTED);
2982 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2983 struct mgmt_cp_pin_code_neg_reply ncp;
2985 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2987 BT_ERR("PIN code is not 16 bytes long");
2989 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2991 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2992 MGMT_STATUS_INVALID_PARAMS);
2997 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3003 cmd->cmd_complete = addr_cmd_complete;
3005 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3006 reply.pin_len = cp->pin_len;
3007 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3009 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3011 mgmt_pending_remove(cmd);
3014 hci_dev_unlock(hdev);
3018 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3021 struct mgmt_cp_set_io_capability *cp = data;
3025 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3026 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3027 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3031 hdev->io_capability = cp->io_capability;
3033 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3034 hdev->io_capability);
3036 hci_dev_unlock(hdev);
3038 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3042 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3044 struct hci_dev *hdev = conn->hdev;
3045 struct mgmt_pending_cmd *cmd;
3047 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3048 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3051 if (cmd->user_data != conn)
3060 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3062 struct mgmt_rp_pair_device rp;
3063 struct hci_conn *conn = cmd->user_data;
3066 bacpy(&rp.addr.bdaddr, &conn->dst);
3067 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3069 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3070 status, &rp, sizeof(rp));
3072 /* So we don't get further callbacks for this connection */
3073 conn->connect_cfm_cb = NULL;
3074 conn->security_cfm_cb = NULL;
3075 conn->disconn_cfm_cb = NULL;
3077 hci_conn_drop(conn);
3079 /* The device is paired so there is no need to remove
3080 * its connection parameters anymore.
3082 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3089 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3091 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3092 struct mgmt_pending_cmd *cmd;
3094 cmd = find_pairing(conn);
3096 cmd->cmd_complete(cmd, status);
3097 mgmt_pending_remove(cmd);
3101 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3103 struct mgmt_pending_cmd *cmd;
3105 BT_DBG("status %u", status);
3107 cmd = find_pairing(conn);
3109 BT_DBG("Unable to find a pending command");
3113 cmd->cmd_complete(cmd, mgmt_status(status));
3114 mgmt_pending_remove(cmd);
3117 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3119 struct mgmt_pending_cmd *cmd;
3121 BT_DBG("status %u", status);
3126 cmd = find_pairing(conn);
3128 BT_DBG("Unable to find a pending command");
3132 cmd->cmd_complete(cmd, mgmt_status(status));
3133 mgmt_pending_remove(cmd);
3136 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3139 struct mgmt_cp_pair_device *cp = data;
3140 struct mgmt_rp_pair_device rp;
3141 struct mgmt_pending_cmd *cmd;
3142 u8 sec_level, auth_type;
3143 struct hci_conn *conn;
3148 memset(&rp, 0, sizeof(rp));
3149 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3150 rp.addr.type = cp->addr.type;
3152 if (!bdaddr_type_is_valid(cp->addr.type))
3153 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3154 MGMT_STATUS_INVALID_PARAMS,
3157 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3158 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3159 MGMT_STATUS_INVALID_PARAMS,
3164 if (!hdev_is_powered(hdev)) {
3165 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3166 MGMT_STATUS_NOT_POWERED, &rp,
3171 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3172 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3173 MGMT_STATUS_ALREADY_PAIRED, &rp,
3178 sec_level = BT_SECURITY_MEDIUM;
3179 auth_type = HCI_AT_DEDICATED_BONDING;
3181 if (cp->addr.type == BDADDR_BREDR) {
3182 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3187 /* Convert from L2CAP channel address type to HCI address type
3189 if (cp->addr.type == BDADDR_LE_PUBLIC)
3190 addr_type = ADDR_LE_DEV_PUBLIC;
3192 addr_type = ADDR_LE_DEV_RANDOM;
3194 /* When pairing a new device, it is expected to remember
3195 * this device for future connections. Adding the connection
3196 * parameter information ahead of time allows tracking
3197 * of the slave preferred values and will speed up any
3198 * further connection establishment.
3200 * If connection parameters already exist, then they
3201 * will be kept and this function does nothing.
3203 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3205 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3206 sec_level, HCI_LE_CONN_TIMEOUT,
3213 if (PTR_ERR(conn) == -EBUSY)
3214 status = MGMT_STATUS_BUSY;
3215 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3216 status = MGMT_STATUS_NOT_SUPPORTED;
3217 else if (PTR_ERR(conn) == -ECONNREFUSED)
3218 status = MGMT_STATUS_REJECTED;
3220 status = MGMT_STATUS_CONNECT_FAILED;
3222 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3223 status, &rp, sizeof(rp));
3227 if (conn->connect_cfm_cb) {
3228 hci_conn_drop(conn);
3229 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3230 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3234 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3237 hci_conn_drop(conn);
3241 cmd->cmd_complete = pairing_complete;
3243 /* For LE, just connecting isn't a proof that the pairing finished */
3244 if (cp->addr.type == BDADDR_BREDR) {
3245 conn->connect_cfm_cb = pairing_complete_cb;
3246 conn->security_cfm_cb = pairing_complete_cb;
3247 conn->disconn_cfm_cb = pairing_complete_cb;
3249 conn->connect_cfm_cb = le_pairing_complete_cb;
3250 conn->security_cfm_cb = le_pairing_complete_cb;
3251 conn->disconn_cfm_cb = le_pairing_complete_cb;
3254 conn->io_capability = cp->io_cap;
3255 cmd->user_data = hci_conn_get(conn);
3257 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3258 hci_conn_security(conn, sec_level, auth_type, true)) {
3259 cmd->cmd_complete(cmd, 0);
3260 mgmt_pending_remove(cmd);
3266 hci_dev_unlock(hdev);
3270 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3273 struct mgmt_addr_info *addr = data;
3274 struct mgmt_pending_cmd *cmd;
3275 struct hci_conn *conn;
3282 if (!hdev_is_powered(hdev)) {
3283 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3284 MGMT_STATUS_NOT_POWERED);
3288 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3290 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3291 MGMT_STATUS_INVALID_PARAMS);
3295 conn = cmd->user_data;
3297 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3298 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3299 MGMT_STATUS_INVALID_PARAMS);
3303 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3304 mgmt_pending_remove(cmd);
3306 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3307 addr, sizeof(*addr));
3309 hci_dev_unlock(hdev);
3313 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3314 struct mgmt_addr_info *addr, u16 mgmt_op,
3315 u16 hci_op, __le32 passkey)
3317 struct mgmt_pending_cmd *cmd;
3318 struct hci_conn *conn;
3323 if (!hdev_is_powered(hdev)) {
3324 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3325 MGMT_STATUS_NOT_POWERED, addr,
3330 if (addr->type == BDADDR_BREDR)
3331 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3333 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3336 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3337 MGMT_STATUS_NOT_CONNECTED, addr,
3342 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3343 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3345 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3346 MGMT_STATUS_SUCCESS, addr,
3349 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3350 MGMT_STATUS_FAILED, addr,
3356 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3362 cmd->cmd_complete = addr_cmd_complete;
3364 /* Continue with pairing via HCI */
3365 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3366 struct hci_cp_user_passkey_reply cp;
3368 bacpy(&cp.bdaddr, &addr->bdaddr);
3369 cp.passkey = passkey;
3370 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3372 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3376 mgmt_pending_remove(cmd);
3379 hci_dev_unlock(hdev);
3383 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3384 void *data, u16 len)
3386 struct mgmt_cp_pin_code_neg_reply *cp = data;
3390 return user_pairing_resp(sk, hdev, &cp->addr,
3391 MGMT_OP_PIN_CODE_NEG_REPLY,
3392 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3395 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3398 struct mgmt_cp_user_confirm_reply *cp = data;
3402 if (len != sizeof(*cp))
3403 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3404 MGMT_STATUS_INVALID_PARAMS);
3406 return user_pairing_resp(sk, hdev, &cp->addr,
3407 MGMT_OP_USER_CONFIRM_REPLY,
3408 HCI_OP_USER_CONFIRM_REPLY, 0);
3411 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3412 void *data, u16 len)
3414 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3418 return user_pairing_resp(sk, hdev, &cp->addr,
3419 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3420 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3423 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3426 struct mgmt_cp_user_passkey_reply *cp = data;
3430 return user_pairing_resp(sk, hdev, &cp->addr,
3431 MGMT_OP_USER_PASSKEY_REPLY,
3432 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3435 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3436 void *data, u16 len)
3438 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3442 return user_pairing_resp(sk, hdev, &cp->addr,
3443 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3444 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3447 static void update_name(struct hci_request *req)
3449 struct hci_dev *hdev = req->hdev;
3450 struct hci_cp_write_local_name cp;
3452 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3454 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3457 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3459 struct mgmt_cp_set_local_name *cp;
3460 struct mgmt_pending_cmd *cmd;
3462 BT_DBG("status 0x%02x", status);
3466 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3473 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3474 mgmt_status(status));
3476 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3479 mgmt_pending_remove(cmd);
3482 hci_dev_unlock(hdev);
3485 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3488 struct mgmt_cp_set_local_name *cp = data;
3489 struct mgmt_pending_cmd *cmd;
3490 struct hci_request req;
3497 /* If the old values are the same as the new ones just return a
3498 * direct command complete event.
3500 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3501 !memcmp(hdev->short_name, cp->short_name,
3502 sizeof(hdev->short_name))) {
3503 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3508 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3510 if (!hdev_is_powered(hdev)) {
3511 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3513 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3518 err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3524 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3530 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3532 hci_req_init(&req, hdev);
3534 if (lmp_bredr_capable(hdev)) {
3539 /* The name is stored in the scan response data and so
3540 * no need to udpate the advertising data here.
3542 if (lmp_le_capable(hdev))
3543 update_scan_rsp_data(&req);
3545 err = hci_req_run(&req, set_name_complete);
3547 mgmt_pending_remove(cmd);
3550 hci_dev_unlock(hdev);
3554 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3555 void *data, u16 data_len)
3557 struct mgmt_pending_cmd *cmd;
3560 BT_DBG("%s", hdev->name);
3564 if (!hdev_is_powered(hdev)) {
3565 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3566 MGMT_STATUS_NOT_POWERED);
3570 if (!lmp_ssp_capable(hdev)) {
3571 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3572 MGMT_STATUS_NOT_SUPPORTED);
3576 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3577 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3582 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3588 if (bredr_sc_enabled(hdev))
3589 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3592 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3595 mgmt_pending_remove(cmd);
3598 hci_dev_unlock(hdev);
3602 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3603 void *data, u16 len)
3605 struct mgmt_addr_info *addr = data;
3608 BT_DBG("%s ", hdev->name);
3610 if (!bdaddr_type_is_valid(addr->type))
3611 return mgmt_cmd_complete(sk, hdev->id,
3612 MGMT_OP_ADD_REMOTE_OOB_DATA,
3613 MGMT_STATUS_INVALID_PARAMS,
3614 addr, sizeof(*addr));
3618 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3619 struct mgmt_cp_add_remote_oob_data *cp = data;
3622 if (cp->addr.type != BDADDR_BREDR) {
3623 err = mgmt_cmd_complete(sk, hdev->id,
3624 MGMT_OP_ADD_REMOTE_OOB_DATA,
3625 MGMT_STATUS_INVALID_PARAMS,
3626 &cp->addr, sizeof(cp->addr));
3630 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3631 cp->addr.type, cp->hash,
3632 cp->rand, NULL, NULL);
3634 status = MGMT_STATUS_FAILED;
3636 status = MGMT_STATUS_SUCCESS;
3638 err = mgmt_cmd_complete(sk, hdev->id,
3639 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3640 &cp->addr, sizeof(cp->addr));
3641 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3642 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3643 u8 *rand192, *hash192, *rand256, *hash256;
3646 if (bdaddr_type_is_le(cp->addr.type)) {
3647 /* Enforce zero-valued 192-bit parameters as
3648 * long as legacy SMP OOB isn't implemented.
3650 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3651 memcmp(cp->hash192, ZERO_KEY, 16)) {
3652 err = mgmt_cmd_complete(sk, hdev->id,
3653 MGMT_OP_ADD_REMOTE_OOB_DATA,
3654 MGMT_STATUS_INVALID_PARAMS,
3655 addr, sizeof(*addr));
3662 /* In case one of the P-192 values is set to zero,
3663 * then just disable OOB data for P-192.
3665 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3666 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3670 rand192 = cp->rand192;
3671 hash192 = cp->hash192;
3675 /* In case one of the P-256 values is set to zero, then just
3676 * disable OOB data for P-256.
3678 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3679 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3683 rand256 = cp->rand256;
3684 hash256 = cp->hash256;
3687 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3688 cp->addr.type, hash192, rand192,
3691 status = MGMT_STATUS_FAILED;
3693 status = MGMT_STATUS_SUCCESS;
3695 err = mgmt_cmd_complete(sk, hdev->id,
3696 MGMT_OP_ADD_REMOTE_OOB_DATA,
3697 status, &cp->addr, sizeof(cp->addr));
3699 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3700 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3701 MGMT_STATUS_INVALID_PARAMS);
3705 hci_dev_unlock(hdev);
3709 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3710 void *data, u16 len)
3712 struct mgmt_cp_remove_remote_oob_data *cp = data;
3716 BT_DBG("%s", hdev->name);
3718 if (cp->addr.type != BDADDR_BREDR)
3719 return mgmt_cmd_complete(sk, hdev->id,
3720 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3721 MGMT_STATUS_INVALID_PARAMS,
3722 &cp->addr, sizeof(cp->addr));
3726 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3727 hci_remote_oob_data_clear(hdev);
3728 status = MGMT_STATUS_SUCCESS;
3732 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3734 status = MGMT_STATUS_INVALID_PARAMS;
3736 status = MGMT_STATUS_SUCCESS;
3739 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3740 status, &cp->addr, sizeof(cp->addr));
3742 hci_dev_unlock(hdev);
3746 static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
3748 struct hci_dev *hdev = req->hdev;
3749 struct hci_cp_inquiry cp;
3750 /* General inquiry access code (GIAC) */
3751 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3753 *status = mgmt_bredr_support(hdev);
3757 if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
3758 *status = MGMT_STATUS_BUSY;
3762 hci_inquiry_cache_flush(hdev);
3764 memset(&cp, 0, sizeof(cp));
3765 memcpy(&cp.lap, lap, sizeof(cp.lap));
3766 cp.length = DISCOV_BREDR_INQUIRY_LEN;
3768 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3773 static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
3775 struct hci_dev *hdev = req->hdev;
3776 struct hci_cp_le_set_scan_param param_cp;
3777 struct hci_cp_le_set_scan_enable enable_cp;
3781 *status = mgmt_le_support(hdev);
3785 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
3786 /* Don't let discovery abort an outgoing connection attempt
3787 * that's using directed advertising.
3789 if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3790 *status = MGMT_STATUS_REJECTED;
3794 disable_advertising(req);
3797 /* If controller is scanning, it means the background scanning is
3798 * running. Thus, we should temporarily stop it in order to set the
3799 * discovery scanning parameters.
3801 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
3802 hci_req_add_le_scan_disable(req);
3804 /* All active scans will be done with either a resolvable private
3805 * address (when privacy feature has been enabled) or non-resolvable
3808 err = hci_update_random_address(req, true, &own_addr_type);
3810 *status = MGMT_STATUS_FAILED;
3814 memset(¶m_cp, 0, sizeof(param_cp));
3815 param_cp.type = LE_SCAN_ACTIVE;
3816 param_cp.interval = cpu_to_le16(interval);
3817 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3818 param_cp.own_address_type = own_addr_type;
3820 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3823 memset(&enable_cp, 0, sizeof(enable_cp));
3824 enable_cp.enable = LE_SCAN_ENABLE;
3825 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3827 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3833 static bool trigger_discovery(struct hci_request *req, u8 *status)
3835 struct hci_dev *hdev = req->hdev;
3837 switch (hdev->discovery.type) {
3838 case DISCOV_TYPE_BREDR:
3839 if (!trigger_bredr_inquiry(req, status))
3843 case DISCOV_TYPE_INTERLEAVED:
3844 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3846 /* During simultaneous discovery, we double LE scan
3847 * interval. We must leave some time for the controller
3848 * to do BR/EDR inquiry.
3850 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
3854 if (!trigger_bredr_inquiry(req, status))
3860 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
3861 *status = MGMT_STATUS_NOT_SUPPORTED;
3866 case DISCOV_TYPE_LE:
3867 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
3872 *status = MGMT_STATUS_INVALID_PARAMS;
3879 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3882 struct mgmt_pending_cmd *cmd;
3883 unsigned long timeout;
3885 BT_DBG("status %d", status);
3889 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3891 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3894 cmd->cmd_complete(cmd, mgmt_status(status));
3895 mgmt_pending_remove(cmd);
3899 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3903 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3905 /* If the scan involves LE scan, pick proper timeout to schedule
3906 * hdev->le_scan_disable that will stop it.
3908 switch (hdev->discovery.type) {
3909 case DISCOV_TYPE_LE:
3910 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3912 case DISCOV_TYPE_INTERLEAVED:
3913 /* When running simultaneous discovery, the LE scanning time
3914 * should occupy the whole discovery time sine BR/EDR inquiry
3915 * and LE scanning are scheduled by the controller.
3917 * For interleaving discovery in comparison, BR/EDR inquiry
3918 * and LE scanning are done sequentially with separate
3921 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3922 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3924 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3926 case DISCOV_TYPE_BREDR:
3930 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3936 /* When service discovery is used and the controller has
3937 * a strict duplicate filter, it is important to remember
3938 * the start and duration of the scan. This is required
3939 * for restarting scanning during the discovery phase.
3941 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
3943 hdev->discovery.result_filtering) {
3944 hdev->discovery.scan_start = jiffies;
3945 hdev->discovery.scan_duration = timeout;
3948 queue_delayed_work(hdev->workqueue,
3949 &hdev->le_scan_disable, timeout);
3953 hci_dev_unlock(hdev);
3956 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3957 void *data, u16 len)
3959 struct mgmt_cp_start_discovery *cp = data;
3960 struct mgmt_pending_cmd *cmd;
3961 struct hci_request req;
3965 BT_DBG("%s", hdev->name);
3969 if (!hdev_is_powered(hdev)) {
3970 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3971 MGMT_STATUS_NOT_POWERED,
3972 &cp->type, sizeof(cp->type));
3976 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3977 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3978 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3979 MGMT_STATUS_BUSY, &cp->type,
3984 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3990 cmd->cmd_complete = generic_cmd_complete;
3992 /* Clear the discovery filter first to free any previously
3993 * allocated memory for the UUID list.
3995 hci_discovery_filter_clear(hdev);
3997 hdev->discovery.type = cp->type;
3998 hdev->discovery.report_invalid_rssi = false;
4000 hci_req_init(&req, hdev);
4002 if (!trigger_discovery(&req, &status)) {
4003 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4004 status, &cp->type, sizeof(cp->type));
4005 mgmt_pending_remove(cmd);
4009 err = hci_req_run(&req, start_discovery_complete);
4011 mgmt_pending_remove(cmd);
4015 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4018 hci_dev_unlock(hdev);
4022 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4025 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4029 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4030 void *data, u16 len)
4032 struct mgmt_cp_start_service_discovery *cp = data;
4033 struct mgmt_pending_cmd *cmd;
4034 struct hci_request req;
4035 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4036 u16 uuid_count, expected_len;
4040 BT_DBG("%s", hdev->name);
4044 if (!hdev_is_powered(hdev)) {
4045 err = mgmt_cmd_complete(sk, hdev->id,
4046 MGMT_OP_START_SERVICE_DISCOVERY,
4047 MGMT_STATUS_NOT_POWERED,
4048 &cp->type, sizeof(cp->type));
4052 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4053 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4054 err = mgmt_cmd_complete(sk, hdev->id,
4055 MGMT_OP_START_SERVICE_DISCOVERY,
4056 MGMT_STATUS_BUSY, &cp->type,
4061 uuid_count = __le16_to_cpu(cp->uuid_count);
4062 if (uuid_count > max_uuid_count) {
4063 BT_ERR("service_discovery: too big uuid_count value %u",
4065 err = mgmt_cmd_complete(sk, hdev->id,
4066 MGMT_OP_START_SERVICE_DISCOVERY,
4067 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4072 expected_len = sizeof(*cp) + uuid_count * 16;
4073 if (expected_len != len) {
4074 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4076 err = mgmt_cmd_complete(sk, hdev->id,
4077 MGMT_OP_START_SERVICE_DISCOVERY,
4078 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4083 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4090 cmd->cmd_complete = service_discovery_cmd_complete;
4092 /* Clear the discovery filter first to free any previously
4093 * allocated memory for the UUID list.
4095 hci_discovery_filter_clear(hdev);
4097 hdev->discovery.result_filtering = true;
4098 hdev->discovery.type = cp->type;
4099 hdev->discovery.rssi = cp->rssi;
4100 hdev->discovery.uuid_count = uuid_count;
4102 if (uuid_count > 0) {
4103 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4105 if (!hdev->discovery.uuids) {
4106 err = mgmt_cmd_complete(sk, hdev->id,
4107 MGMT_OP_START_SERVICE_DISCOVERY,
4109 &cp->type, sizeof(cp->type));
4110 mgmt_pending_remove(cmd);
4115 hci_req_init(&req, hdev);
4117 if (!trigger_discovery(&req, &status)) {
4118 err = mgmt_cmd_complete(sk, hdev->id,
4119 MGMT_OP_START_SERVICE_DISCOVERY,
4120 status, &cp->type, sizeof(cp->type));
4121 mgmt_pending_remove(cmd);
4125 err = hci_req_run(&req, start_discovery_complete);
4127 mgmt_pending_remove(cmd);
4131 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4134 hci_dev_unlock(hdev);
4138 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4140 struct mgmt_pending_cmd *cmd;
4142 BT_DBG("status %d", status);
4146 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4148 cmd->cmd_complete(cmd, mgmt_status(status));
4149 mgmt_pending_remove(cmd);
4153 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4155 hci_dev_unlock(hdev);
4158 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4161 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4162 struct mgmt_pending_cmd *cmd;
4163 struct hci_request req;
4166 BT_DBG("%s", hdev->name);
4170 if (!hci_discovery_active(hdev)) {
4171 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4172 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4173 sizeof(mgmt_cp->type));
4177 if (hdev->discovery.type != mgmt_cp->type) {
4178 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4179 MGMT_STATUS_INVALID_PARAMS,
4180 &mgmt_cp->type, sizeof(mgmt_cp->type));
4184 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4190 cmd->cmd_complete = generic_cmd_complete;
4192 hci_req_init(&req, hdev);
4194 hci_stop_discovery(&req);
4196 err = hci_req_run(&req, stop_discovery_complete);
4198 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4202 mgmt_pending_remove(cmd);
4204 /* If no HCI commands were sent we're done */
4205 if (err == -ENODATA) {
4206 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4207 &mgmt_cp->type, sizeof(mgmt_cp->type));
4208 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4212 hci_dev_unlock(hdev);
4216 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4219 struct mgmt_cp_confirm_name *cp = data;
4220 struct inquiry_entry *e;
4223 BT_DBG("%s", hdev->name);
4227 if (!hci_discovery_active(hdev)) {
4228 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4229 MGMT_STATUS_FAILED, &cp->addr,
4234 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4236 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4237 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4242 if (cp->name_known) {
4243 e->name_state = NAME_KNOWN;
4246 e->name_state = NAME_NEEDED;
4247 hci_inquiry_cache_update_resolve(hdev, e);
4250 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4251 &cp->addr, sizeof(cp->addr));
4254 hci_dev_unlock(hdev);
4258 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4261 struct mgmt_cp_block_device *cp = data;
4265 BT_DBG("%s", hdev->name);
4267 if (!bdaddr_type_is_valid(cp->addr.type))
4268 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4269 MGMT_STATUS_INVALID_PARAMS,
4270 &cp->addr, sizeof(cp->addr));
4274 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4277 status = MGMT_STATUS_FAILED;
4281 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4283 status = MGMT_STATUS_SUCCESS;
4286 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4287 &cp->addr, sizeof(cp->addr));
4289 hci_dev_unlock(hdev);
4294 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4297 struct mgmt_cp_unblock_device *cp = data;
4301 BT_DBG("%s", hdev->name);
4303 if (!bdaddr_type_is_valid(cp->addr.type))
4304 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4305 MGMT_STATUS_INVALID_PARAMS,
4306 &cp->addr, sizeof(cp->addr));
4310 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4313 status = MGMT_STATUS_INVALID_PARAMS;
4317 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4319 status = MGMT_STATUS_SUCCESS;
4322 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4323 &cp->addr, sizeof(cp->addr));
4325 hci_dev_unlock(hdev);
4330 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4333 struct mgmt_cp_set_device_id *cp = data;
4334 struct hci_request req;
4338 BT_DBG("%s", hdev->name);
4340 source = __le16_to_cpu(cp->source);
4342 if (source > 0x0002)
4343 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4344 MGMT_STATUS_INVALID_PARAMS);
4348 hdev->devid_source = source;
4349 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4350 hdev->devid_product = __le16_to_cpu(cp->product);
4351 hdev->devid_version = __le16_to_cpu(cp->version);
4353 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4356 hci_req_init(&req, hdev);
4358 hci_req_run(&req, NULL);
4360 hci_dev_unlock(hdev);
4365 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4368 struct cmd_lookup match = { NULL, hdev };
4373 u8 mgmt_err = mgmt_status(status);
4375 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4376 cmd_status_rsp, &mgmt_err);
4380 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4381 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4383 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4385 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4388 new_settings(hdev, match.sk);
4394 hci_dev_unlock(hdev);
4397 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4400 struct mgmt_mode *cp = data;
4401 struct mgmt_pending_cmd *cmd;
4402 struct hci_request req;
4406 BT_DBG("request for %s", hdev->name);
4408 status = mgmt_le_support(hdev);
4410 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4413 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4414 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4415 MGMT_STATUS_INVALID_PARAMS);
4421 /* The following conditions are ones which mean that we should
4422 * not do any HCI communication but directly send a mgmt
4423 * response to user space (after toggling the flag if
4426 if (!hdev_is_powered(hdev) ||
4427 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4428 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4429 hci_conn_num(hdev, LE_LINK) > 0 ||
4430 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4431 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4435 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4436 if (cp->val == 0x02)
4437 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4439 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4441 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4442 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4445 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4450 err = new_settings(hdev, sk);
4455 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4456 pending_find(MGMT_OP_SET_LE, hdev)) {
4457 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4462 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4468 hci_req_init(&req, hdev);
4470 if (cp->val == 0x02)
4471 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4473 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4476 enable_advertising(&req);
4478 disable_advertising(&req);
4480 err = hci_req_run(&req, set_advertising_complete);
4482 mgmt_pending_remove(cmd);
4485 hci_dev_unlock(hdev);
4489 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4490 void *data, u16 len)
4492 struct mgmt_cp_set_static_address *cp = data;
4495 BT_DBG("%s", hdev->name);
4497 if (!lmp_le_capable(hdev))
4498 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4499 MGMT_STATUS_NOT_SUPPORTED);
4501 if (hdev_is_powered(hdev))
4502 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4503 MGMT_STATUS_REJECTED);
4505 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4506 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4507 return mgmt_cmd_status(sk, hdev->id,
4508 MGMT_OP_SET_STATIC_ADDRESS,
4509 MGMT_STATUS_INVALID_PARAMS);
4511 /* Two most significant bits shall be set */
4512 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4513 return mgmt_cmd_status(sk, hdev->id,
4514 MGMT_OP_SET_STATIC_ADDRESS,
4515 MGMT_STATUS_INVALID_PARAMS);
4520 bacpy(&hdev->static_addr, &cp->bdaddr);
4522 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4526 err = new_settings(hdev, sk);
4529 hci_dev_unlock(hdev);
4533 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4534 void *data, u16 len)
4536 struct mgmt_cp_set_scan_params *cp = data;
4537 __u16 interval, window;
4540 BT_DBG("%s", hdev->name);
4542 if (!lmp_le_capable(hdev))
4543 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4544 MGMT_STATUS_NOT_SUPPORTED);
4546 interval = __le16_to_cpu(cp->interval);
4548 if (interval < 0x0004 || interval > 0x4000)
4549 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4550 MGMT_STATUS_INVALID_PARAMS);
4552 window = __le16_to_cpu(cp->window);
4554 if (window < 0x0004 || window > 0x4000)
4555 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4556 MGMT_STATUS_INVALID_PARAMS);
4558 if (window > interval)
4559 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4560 MGMT_STATUS_INVALID_PARAMS);
4564 hdev->le_scan_interval = interval;
4565 hdev->le_scan_window = window;
4567 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4570 /* If background scan is running, restart it so new parameters are
4573 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4574 hdev->discovery.state == DISCOVERY_STOPPED) {
4575 struct hci_request req;
4577 hci_req_init(&req, hdev);
4579 hci_req_add_le_scan_disable(&req);
4580 hci_req_add_le_passive_scan(&req);
4582 hci_req_run(&req, NULL);
4585 hci_dev_unlock(hdev);
4590 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4593 struct mgmt_pending_cmd *cmd;
4595 BT_DBG("status 0x%02x", status);
4599 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4604 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4605 mgmt_status(status));
4607 struct mgmt_mode *cp = cmd->param;
4610 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4612 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4614 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4615 new_settings(hdev, cmd->sk);
4618 mgmt_pending_remove(cmd);
4621 hci_dev_unlock(hdev);
4624 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4625 void *data, u16 len)
4627 struct mgmt_mode *cp = data;
4628 struct mgmt_pending_cmd *cmd;
4629 struct hci_request req;
4632 BT_DBG("%s", hdev->name);
4634 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4635 hdev->hci_ver < BLUETOOTH_VER_1_2)
4636 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4637 MGMT_STATUS_NOT_SUPPORTED);
4639 if (cp->val != 0x00 && cp->val != 0x01)
4640 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4641 MGMT_STATUS_INVALID_PARAMS);
4645 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4646 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4651 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4652 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4657 if (!hdev_is_powered(hdev)) {
4658 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4659 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4661 new_settings(hdev, sk);
4665 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4672 hci_req_init(&req, hdev);
4674 write_fast_connectable(&req, cp->val);
4676 err = hci_req_run(&req, fast_connectable_complete);
4678 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4679 MGMT_STATUS_FAILED);
4680 mgmt_pending_remove(cmd);
4684 hci_dev_unlock(hdev);
4689 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4691 struct mgmt_pending_cmd *cmd;
4693 BT_DBG("status 0x%02x", status);
4697 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4702 u8 mgmt_err = mgmt_status(status);
4704 /* We need to restore the flag if related HCI commands
4707 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4709 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4711 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4712 new_settings(hdev, cmd->sk);
4715 mgmt_pending_remove(cmd);
4718 hci_dev_unlock(hdev);
4721 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4723 struct mgmt_mode *cp = data;
4724 struct mgmt_pending_cmd *cmd;
4725 struct hci_request req;
4728 BT_DBG("request for %s", hdev->name);
4730 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4731 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4732 MGMT_STATUS_NOT_SUPPORTED);
4734 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4735 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4736 MGMT_STATUS_REJECTED);
4738 if (cp->val != 0x00 && cp->val != 0x01)
4739 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4740 MGMT_STATUS_INVALID_PARAMS);
4744 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4745 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4749 if (!hdev_is_powered(hdev)) {
4751 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4752 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4753 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4754 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4755 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4758 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4760 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4764 err = new_settings(hdev, sk);
4768 /* Reject disabling when powered on */
4770 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4771 MGMT_STATUS_REJECTED);
4774 /* When configuring a dual-mode controller to operate
4775 * with LE only and using a static address, then switching
4776 * BR/EDR back on is not allowed.
4778 * Dual-mode controllers shall operate with the public
4779 * address as its identity address for BR/EDR and LE. So
4780 * reject the attempt to create an invalid configuration.
4782 * The same restrictions applies when secure connections
4783 * has been enabled. For BR/EDR this is a controller feature
4784 * while for LE it is a host stack feature. This means that
4785 * switching BR/EDR back on when secure connections has been
4786 * enabled is not a supported transaction.
4788 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4789 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4790 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4791 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4792 MGMT_STATUS_REJECTED);
4797 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4798 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4803 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4809 /* We need to flip the bit already here so that update_adv_data
4810 * generates the correct flags.
4812 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4814 hci_req_init(&req, hdev);
4816 write_fast_connectable(&req, false);
4817 __hci_update_page_scan(&req);
4819 /* Since only the advertising data flags will change, there
4820 * is no need to update the scan response data.
4822 update_adv_data(&req);
4824 err = hci_req_run(&req, set_bredr_complete);
4826 mgmt_pending_remove(cmd);
4829 hci_dev_unlock(hdev);
4833 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4835 struct mgmt_pending_cmd *cmd;
4836 struct mgmt_mode *cp;
4838 BT_DBG("%s status %u", hdev->name, status);
4842 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4847 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4848 mgmt_status(status));
4856 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4857 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4860 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4861 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4864 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4865 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4869 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4870 new_settings(hdev, cmd->sk);
4873 mgmt_pending_remove(cmd);
4875 hci_dev_unlock(hdev);
4878 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4879 void *data, u16 len)
4881 struct mgmt_mode *cp = data;
4882 struct mgmt_pending_cmd *cmd;
4883 struct hci_request req;
4887 BT_DBG("request for %s", hdev->name);
4889 if (!lmp_sc_capable(hdev) &&
4890 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4891 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4892 MGMT_STATUS_NOT_SUPPORTED);
4894 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4895 lmp_sc_capable(hdev) &&
4896 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4897 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4898 MGMT_STATUS_REJECTED);
4900 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4901 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4902 MGMT_STATUS_INVALID_PARAMS);
4906 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4907 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4911 changed = !hci_dev_test_and_set_flag(hdev,
4913 if (cp->val == 0x02)
4914 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4916 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4918 changed = hci_dev_test_and_clear_flag(hdev,
4920 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4923 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4928 err = new_settings(hdev, sk);
4933 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4934 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4941 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4942 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4943 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4947 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4953 hci_req_init(&req, hdev);
4954 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4955 err = hci_req_run(&req, sc_enable_complete);
4957 mgmt_pending_remove(cmd);
4962 hci_dev_unlock(hdev);
4966 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4967 void *data, u16 len)
4969 struct mgmt_mode *cp = data;
4970 bool changed, use_changed;
4973 BT_DBG("request for %s", hdev->name);
4975 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4976 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4977 MGMT_STATUS_INVALID_PARAMS);
4982 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4984 changed = hci_dev_test_and_clear_flag(hdev,
4985 HCI_KEEP_DEBUG_KEYS);
4987 if (cp->val == 0x02)
4988 use_changed = !hci_dev_test_and_set_flag(hdev,
4989 HCI_USE_DEBUG_KEYS);
4991 use_changed = hci_dev_test_and_clear_flag(hdev,
4992 HCI_USE_DEBUG_KEYS);
4994 if (hdev_is_powered(hdev) && use_changed &&
4995 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4996 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4997 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4998 sizeof(mode), &mode);
5001 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5006 err = new_settings(hdev, sk);
5009 hci_dev_unlock(hdev);
5013 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5016 struct mgmt_cp_set_privacy *cp = cp_data;
5020 BT_DBG("request for %s", hdev->name);
5022 if (!lmp_le_capable(hdev))
5023 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5024 MGMT_STATUS_NOT_SUPPORTED);
5026 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5027 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5028 MGMT_STATUS_INVALID_PARAMS);
5030 if (hdev_is_powered(hdev))
5031 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5032 MGMT_STATUS_REJECTED);
5036 /* If user space supports this command it is also expected to
5037 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5039 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5042 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5043 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5044 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5046 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5047 memset(hdev->irk, 0, sizeof(hdev->irk));
5048 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5051 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5056 err = new_settings(hdev, sk);
5059 hci_dev_unlock(hdev);
5063 static bool irk_is_valid(struct mgmt_irk_info *irk)
5065 switch (irk->addr.type) {
5066 case BDADDR_LE_PUBLIC:
5069 case BDADDR_LE_RANDOM:
5070 /* Two most significant bits shall be set */
5071 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5079 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5082 struct mgmt_cp_load_irks *cp = cp_data;
5083 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5084 sizeof(struct mgmt_irk_info));
5085 u16 irk_count, expected_len;
5088 BT_DBG("request for %s", hdev->name);
5090 if (!lmp_le_capable(hdev))
5091 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5092 MGMT_STATUS_NOT_SUPPORTED);
5094 irk_count = __le16_to_cpu(cp->irk_count);
5095 if (irk_count > max_irk_count) {
5096 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5097 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5098 MGMT_STATUS_INVALID_PARAMS);
5101 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5102 if (expected_len != len) {
5103 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5105 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5106 MGMT_STATUS_INVALID_PARAMS);
5109 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5111 for (i = 0; i < irk_count; i++) {
5112 struct mgmt_irk_info *key = &cp->irks[i];
5114 if (!irk_is_valid(key))
5115 return mgmt_cmd_status(sk, hdev->id,
5117 MGMT_STATUS_INVALID_PARAMS);
5122 hci_smp_irks_clear(hdev);
5124 for (i = 0; i < irk_count; i++) {
5125 struct mgmt_irk_info *irk = &cp->irks[i];
5128 if (irk->addr.type == BDADDR_LE_PUBLIC)
5129 addr_type = ADDR_LE_DEV_PUBLIC;
5131 addr_type = ADDR_LE_DEV_RANDOM;
5133 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5137 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5139 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5141 hci_dev_unlock(hdev);
5146 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5148 if (key->master != 0x00 && key->master != 0x01)
5151 switch (key->addr.type) {
5152 case BDADDR_LE_PUBLIC:
5155 case BDADDR_LE_RANDOM:
5156 /* Two most significant bits shall be set */
5157 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5165 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5166 void *cp_data, u16 len)
5168 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5169 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5170 sizeof(struct mgmt_ltk_info));
5171 u16 key_count, expected_len;
5174 BT_DBG("request for %s", hdev->name);
5176 if (!lmp_le_capable(hdev))
5177 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5178 MGMT_STATUS_NOT_SUPPORTED);
5180 key_count = __le16_to_cpu(cp->key_count);
5181 if (key_count > max_key_count) {
5182 BT_ERR("load_ltks: too big key_count value %u", key_count);
5183 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5184 MGMT_STATUS_INVALID_PARAMS);
5187 expected_len = sizeof(*cp) + key_count *
5188 sizeof(struct mgmt_ltk_info);
5189 if (expected_len != len) {
5190 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5192 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5193 MGMT_STATUS_INVALID_PARAMS);
5196 BT_DBG("%s key_count %u", hdev->name, key_count);
5198 for (i = 0; i < key_count; i++) {
5199 struct mgmt_ltk_info *key = &cp->keys[i];
5201 if (!ltk_is_valid(key))
5202 return mgmt_cmd_status(sk, hdev->id,
5203 MGMT_OP_LOAD_LONG_TERM_KEYS,
5204 MGMT_STATUS_INVALID_PARAMS);
5209 hci_smp_ltks_clear(hdev);
5211 for (i = 0; i < key_count; i++) {
5212 struct mgmt_ltk_info *key = &cp->keys[i];
5213 u8 type, addr_type, authenticated;
5215 if (key->addr.type == BDADDR_LE_PUBLIC)
5216 addr_type = ADDR_LE_DEV_PUBLIC;
5218 addr_type = ADDR_LE_DEV_RANDOM;
5220 switch (key->type) {
5221 case MGMT_LTK_UNAUTHENTICATED:
5222 authenticated = 0x00;
5223 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5225 case MGMT_LTK_AUTHENTICATED:
5226 authenticated = 0x01;
5227 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5229 case MGMT_LTK_P256_UNAUTH:
5230 authenticated = 0x00;
5231 type = SMP_LTK_P256;
5233 case MGMT_LTK_P256_AUTH:
5234 authenticated = 0x01;
5235 type = SMP_LTK_P256;
5237 case MGMT_LTK_P256_DEBUG:
5238 authenticated = 0x00;
5239 type = SMP_LTK_P256_DEBUG;
5244 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5245 authenticated, key->val, key->enc_size, key->ediv,
5249 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5252 hci_dev_unlock(hdev);
5257 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5259 struct hci_conn *conn = cmd->user_data;
5260 struct mgmt_rp_get_conn_info rp;
5263 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5265 if (status == MGMT_STATUS_SUCCESS) {
5266 rp.rssi = conn->rssi;
5267 rp.tx_power = conn->tx_power;
5268 rp.max_tx_power = conn->max_tx_power;
5270 rp.rssi = HCI_RSSI_INVALID;
5271 rp.tx_power = HCI_TX_POWER_INVALID;
5272 rp.max_tx_power = HCI_TX_POWER_INVALID;
5275 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5276 status, &rp, sizeof(rp));
5278 hci_conn_drop(conn);
5284 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5287 struct hci_cp_read_rssi *cp;
5288 struct mgmt_pending_cmd *cmd;
5289 struct hci_conn *conn;
5293 BT_DBG("status 0x%02x", hci_status);
5297 /* Commands sent in request are either Read RSSI or Read Transmit Power
5298 * Level so we check which one was last sent to retrieve connection
5299 * handle. Both commands have handle as first parameter so it's safe to
5300 * cast data on the same command struct.
5302 * First command sent is always Read RSSI and we fail only if it fails.
5303 * In other case we simply override error to indicate success as we
5304 * already remembered if TX power value is actually valid.
5306 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5308 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5309 status = MGMT_STATUS_SUCCESS;
5311 status = mgmt_status(hci_status);
5315 BT_ERR("invalid sent_cmd in conn_info response");
5319 handle = __le16_to_cpu(cp->handle);
5320 conn = hci_conn_hash_lookup_handle(hdev, handle);
5322 BT_ERR("unknown handle (%d) in conn_info response", handle);
5326 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5330 cmd->cmd_complete(cmd, status);
5331 mgmt_pending_remove(cmd);
5334 hci_dev_unlock(hdev);
5337 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5340 struct mgmt_cp_get_conn_info *cp = data;
5341 struct mgmt_rp_get_conn_info rp;
5342 struct hci_conn *conn;
5343 unsigned long conn_info_age;
5346 BT_DBG("%s", hdev->name);
5348 memset(&rp, 0, sizeof(rp));
5349 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5350 rp.addr.type = cp->addr.type;
5352 if (!bdaddr_type_is_valid(cp->addr.type))
5353 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5354 MGMT_STATUS_INVALID_PARAMS,
5359 if (!hdev_is_powered(hdev)) {
5360 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5361 MGMT_STATUS_NOT_POWERED, &rp,
5366 if (cp->addr.type == BDADDR_BREDR)
5367 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5370 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5372 if (!conn || conn->state != BT_CONNECTED) {
5373 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5374 MGMT_STATUS_NOT_CONNECTED, &rp,
5379 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5380 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5381 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5385 /* To avoid client trying to guess when to poll again for information we
5386 * calculate conn info age as random value between min/max set in hdev.
5388 conn_info_age = hdev->conn_info_min_age +
5389 prandom_u32_max(hdev->conn_info_max_age -
5390 hdev->conn_info_min_age);
5392 /* Query controller to refresh cached values if they are too old or were
5395 if (time_after(jiffies, conn->conn_info_timestamp +
5396 msecs_to_jiffies(conn_info_age)) ||
5397 !conn->conn_info_timestamp) {
5398 struct hci_request req;
5399 struct hci_cp_read_tx_power req_txp_cp;
5400 struct hci_cp_read_rssi req_rssi_cp;
5401 struct mgmt_pending_cmd *cmd;
5403 hci_req_init(&req, hdev);
5404 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5405 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5408 /* For LE links TX power does not change thus we don't need to
5409 * query for it once value is known.
5411 if (!bdaddr_type_is_le(cp->addr.type) ||
5412 conn->tx_power == HCI_TX_POWER_INVALID) {
5413 req_txp_cp.handle = cpu_to_le16(conn->handle);
5414 req_txp_cp.type = 0x00;
5415 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5416 sizeof(req_txp_cp), &req_txp_cp);
5419 /* Max TX power needs to be read only once per connection */
5420 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5421 req_txp_cp.handle = cpu_to_le16(conn->handle);
5422 req_txp_cp.type = 0x01;
5423 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5424 sizeof(req_txp_cp), &req_txp_cp);
5427 err = hci_req_run(&req, conn_info_refresh_complete);
5431 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5438 hci_conn_hold(conn);
5439 cmd->user_data = hci_conn_get(conn);
5440 cmd->cmd_complete = conn_info_cmd_complete;
5442 conn->conn_info_timestamp = jiffies;
5444 /* Cache is valid, just reply with values cached in hci_conn */
5445 rp.rssi = conn->rssi;
5446 rp.tx_power = conn->tx_power;
5447 rp.max_tx_power = conn->max_tx_power;
5449 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5450 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5454 hci_dev_unlock(hdev);
5458 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5460 struct hci_conn *conn = cmd->user_data;
5461 struct mgmt_rp_get_clock_info rp;
5462 struct hci_dev *hdev;
5465 memset(&rp, 0, sizeof(rp));
5466 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5471 hdev = hci_dev_get(cmd->index);
5473 rp.local_clock = cpu_to_le32(hdev->clock);
5478 rp.piconet_clock = cpu_to_le32(conn->clock);
5479 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5483 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5487 hci_conn_drop(conn);
5494 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5496 struct hci_cp_read_clock *hci_cp;
5497 struct mgmt_pending_cmd *cmd;
5498 struct hci_conn *conn;
5500 BT_DBG("%s status %u", hdev->name, status);
5504 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5508 if (hci_cp->which) {
5509 u16 handle = __le16_to_cpu(hci_cp->handle);
5510 conn = hci_conn_hash_lookup_handle(hdev, handle);
5515 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5519 cmd->cmd_complete(cmd, mgmt_status(status));
5520 mgmt_pending_remove(cmd);
5523 hci_dev_unlock(hdev);
5526 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5529 struct mgmt_cp_get_clock_info *cp = data;
5530 struct mgmt_rp_get_clock_info rp;
5531 struct hci_cp_read_clock hci_cp;
5532 struct mgmt_pending_cmd *cmd;
5533 struct hci_request req;
5534 struct hci_conn *conn;
5537 BT_DBG("%s", hdev->name);
5539 memset(&rp, 0, sizeof(rp));
5540 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5541 rp.addr.type = cp->addr.type;
5543 if (cp->addr.type != BDADDR_BREDR)
5544 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5545 MGMT_STATUS_INVALID_PARAMS,
5550 if (!hdev_is_powered(hdev)) {
5551 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5552 MGMT_STATUS_NOT_POWERED, &rp,
5557 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5558 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5560 if (!conn || conn->state != BT_CONNECTED) {
5561 err = mgmt_cmd_complete(sk, hdev->id,
5562 MGMT_OP_GET_CLOCK_INFO,
5563 MGMT_STATUS_NOT_CONNECTED,
5571 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5577 cmd->cmd_complete = clock_info_cmd_complete;
5579 hci_req_init(&req, hdev);
5581 memset(&hci_cp, 0, sizeof(hci_cp));
5582 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5585 hci_conn_hold(conn);
5586 cmd->user_data = hci_conn_get(conn);
5588 hci_cp.handle = cpu_to_le16(conn->handle);
5589 hci_cp.which = 0x01; /* Piconet clock */
5590 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5593 err = hci_req_run(&req, get_clock_info_complete);
5595 mgmt_pending_remove(cmd);
5598 hci_dev_unlock(hdev);
5602 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5604 struct hci_conn *conn;
5606 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5610 if (conn->dst_type != type)
5613 if (conn->state != BT_CONNECTED)
5619 /* This function requires the caller holds hdev->lock */
5620 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5621 u8 addr_type, u8 auto_connect)
5623 struct hci_dev *hdev = req->hdev;
5624 struct hci_conn_params *params;
5626 params = hci_conn_params_add(hdev, addr, addr_type);
5630 if (params->auto_connect == auto_connect)
5633 list_del_init(¶ms->action);
5635 switch (auto_connect) {
5636 case HCI_AUTO_CONN_DISABLED:
5637 case HCI_AUTO_CONN_LINK_LOSS:
5638 __hci_update_background_scan(req);
5640 case HCI_AUTO_CONN_REPORT:
5641 list_add(¶ms->action, &hdev->pend_le_reports);
5642 __hci_update_background_scan(req);
5644 case HCI_AUTO_CONN_DIRECT:
5645 case HCI_AUTO_CONN_ALWAYS:
5646 if (!is_connected(hdev, addr, addr_type)) {
5647 list_add(¶ms->action, &hdev->pend_le_conns);
5648 __hci_update_background_scan(req);
5653 params->auto_connect = auto_connect;
5655 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5661 static void device_added(struct sock *sk, struct hci_dev *hdev,
5662 bdaddr_t *bdaddr, u8 type, u8 action)
5664 struct mgmt_ev_device_added ev;
5666 bacpy(&ev.addr.bdaddr, bdaddr);
5667 ev.addr.type = type;
5670 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5673 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5675 struct mgmt_pending_cmd *cmd;
5677 BT_DBG("status 0x%02x", status);
5681 cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
5685 cmd->cmd_complete(cmd, mgmt_status(status));
5686 mgmt_pending_remove(cmd);
5689 hci_dev_unlock(hdev);
5692 static int add_device(struct sock *sk, struct hci_dev *hdev,
5693 void *data, u16 len)
5695 struct mgmt_cp_add_device *cp = data;
5696 struct mgmt_pending_cmd *cmd;
5697 struct hci_request req;
5698 u8 auto_conn, addr_type;
5701 BT_DBG("%s", hdev->name);
5703 if (!bdaddr_type_is_valid(cp->addr.type) ||
5704 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5705 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5706 MGMT_STATUS_INVALID_PARAMS,
5707 &cp->addr, sizeof(cp->addr));
5709 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5710 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5711 MGMT_STATUS_INVALID_PARAMS,
5712 &cp->addr, sizeof(cp->addr));
5714 hci_req_init(&req, hdev);
5718 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5724 cmd->cmd_complete = addr_cmd_complete;
5726 if (cp->addr.type == BDADDR_BREDR) {
5727 /* Only incoming connections action is supported for now */
5728 if (cp->action != 0x01) {
5729 err = cmd->cmd_complete(cmd,
5730 MGMT_STATUS_INVALID_PARAMS);
5731 mgmt_pending_remove(cmd);
5735 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5740 __hci_update_page_scan(&req);
5745 if (cp->addr.type == BDADDR_LE_PUBLIC)
5746 addr_type = ADDR_LE_DEV_PUBLIC;
5748 addr_type = ADDR_LE_DEV_RANDOM;
5750 if (cp->action == 0x02)
5751 auto_conn = HCI_AUTO_CONN_ALWAYS;
5752 else if (cp->action == 0x01)
5753 auto_conn = HCI_AUTO_CONN_DIRECT;
5755 auto_conn = HCI_AUTO_CONN_REPORT;
5757 /* If the connection parameters don't exist for this device,
5758 * they will be created and configured with defaults.
5760 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5762 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5763 mgmt_pending_remove(cmd);
5768 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5770 err = hci_req_run(&req, add_device_complete);
5772 /* ENODATA means no HCI commands were needed (e.g. if
5773 * the adapter is powered off).
5775 if (err == -ENODATA)
5776 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5777 mgmt_pending_remove(cmd);
5781 hci_dev_unlock(hdev);
5785 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5786 bdaddr_t *bdaddr, u8 type)
5788 struct mgmt_ev_device_removed ev;
5790 bacpy(&ev.addr.bdaddr, bdaddr);
5791 ev.addr.type = type;
5793 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5796 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5798 struct mgmt_pending_cmd *cmd;
5800 BT_DBG("status 0x%02x", status);
5804 cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5808 cmd->cmd_complete(cmd, mgmt_status(status));
5809 mgmt_pending_remove(cmd);
5812 hci_dev_unlock(hdev);
5815 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5816 void *data, u16 len)
5818 struct mgmt_cp_remove_device *cp = data;
5819 struct mgmt_pending_cmd *cmd;
5820 struct hci_request req;
5823 BT_DBG("%s", hdev->name);
5825 hci_req_init(&req, hdev);
5829 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5835 cmd->cmd_complete = addr_cmd_complete;
5837 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5838 struct hci_conn_params *params;
5841 if (!bdaddr_type_is_valid(cp->addr.type)) {
5842 err = cmd->cmd_complete(cmd,
5843 MGMT_STATUS_INVALID_PARAMS);
5844 mgmt_pending_remove(cmd);
5848 if (cp->addr.type == BDADDR_BREDR) {
5849 err = hci_bdaddr_list_del(&hdev->whitelist,
5853 err = cmd->cmd_complete(cmd,
5854 MGMT_STATUS_INVALID_PARAMS);
5855 mgmt_pending_remove(cmd);
5859 __hci_update_page_scan(&req);
5861 device_removed(sk, hdev, &cp->addr.bdaddr,
5866 if (cp->addr.type == BDADDR_LE_PUBLIC)
5867 addr_type = ADDR_LE_DEV_PUBLIC;
5869 addr_type = ADDR_LE_DEV_RANDOM;
5871 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5874 err = cmd->cmd_complete(cmd,
5875 MGMT_STATUS_INVALID_PARAMS);
5876 mgmt_pending_remove(cmd);
5880 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5881 err = cmd->cmd_complete(cmd,
5882 MGMT_STATUS_INVALID_PARAMS);
5883 mgmt_pending_remove(cmd);
5887 list_del(¶ms->action);
5888 list_del(¶ms->list);
5890 __hci_update_background_scan(&req);
5892 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5894 struct hci_conn_params *p, *tmp;
5895 struct bdaddr_list *b, *btmp;
5897 if (cp->addr.type) {
5898 err = cmd->cmd_complete(cmd,
5899 MGMT_STATUS_INVALID_PARAMS);
5900 mgmt_pending_remove(cmd);
5904 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5905 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5910 __hci_update_page_scan(&req);
5912 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5913 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5915 device_removed(sk, hdev, &p->addr, p->addr_type);
5916 list_del(&p->action);
5921 BT_DBG("All LE connection parameters were removed");
5923 __hci_update_background_scan(&req);
5927 err = hci_req_run(&req, remove_device_complete);
5929 /* ENODATA means no HCI commands were needed (e.g. if
5930 * the adapter is powered off).
5932 if (err == -ENODATA)
5933 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5934 mgmt_pending_remove(cmd);
5938 hci_dev_unlock(hdev);
5942 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5945 struct mgmt_cp_load_conn_param *cp = data;
5946 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5947 sizeof(struct mgmt_conn_param));
5948 u16 param_count, expected_len;
5951 if (!lmp_le_capable(hdev))
5952 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5953 MGMT_STATUS_NOT_SUPPORTED);
5955 param_count = __le16_to_cpu(cp->param_count);
5956 if (param_count > max_param_count) {
5957 BT_ERR("load_conn_param: too big param_count value %u",
5959 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5960 MGMT_STATUS_INVALID_PARAMS);
5963 expected_len = sizeof(*cp) + param_count *
5964 sizeof(struct mgmt_conn_param);
5965 if (expected_len != len) {
5966 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5968 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5969 MGMT_STATUS_INVALID_PARAMS);
5972 BT_DBG("%s param_count %u", hdev->name, param_count);
5976 hci_conn_params_clear_disabled(hdev);
5978 for (i = 0; i < param_count; i++) {
5979 struct mgmt_conn_param *param = &cp->params[i];
5980 struct hci_conn_params *hci_param;
5981 u16 min, max, latency, timeout;
5984 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5987 if (param->addr.type == BDADDR_LE_PUBLIC) {
5988 addr_type = ADDR_LE_DEV_PUBLIC;
5989 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5990 addr_type = ADDR_LE_DEV_RANDOM;
5992 BT_ERR("Ignoring invalid connection parameters");
5996 min = le16_to_cpu(param->min_interval);
5997 max = le16_to_cpu(param->max_interval);
5998 latency = le16_to_cpu(param->latency);
5999 timeout = le16_to_cpu(param->timeout);
6001 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6002 min, max, latency, timeout);
6004 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6005 BT_ERR("Ignoring invalid connection parameters");
6009 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6012 BT_ERR("Failed to add connection parameters");
6016 hci_param->conn_min_interval = min;
6017 hci_param->conn_max_interval = max;
6018 hci_param->conn_latency = latency;
6019 hci_param->supervision_timeout = timeout;
6022 hci_dev_unlock(hdev);
6024 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6028 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6029 void *data, u16 len)
6031 struct mgmt_cp_set_external_config *cp = data;
6035 BT_DBG("%s", hdev->name);
6037 if (hdev_is_powered(hdev))
6038 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6039 MGMT_STATUS_REJECTED);
6041 if (cp->config != 0x00 && cp->config != 0x01)
6042 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6043 MGMT_STATUS_INVALID_PARAMS);
6045 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6046 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6047 MGMT_STATUS_NOT_SUPPORTED);
6052 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6054 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6056 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6063 err = new_options(hdev, sk);
6065 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6066 mgmt_index_removed(hdev);
6068 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6069 hci_dev_set_flag(hdev, HCI_CONFIG);
6070 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6072 queue_work(hdev->req_workqueue, &hdev->power_on);
6074 set_bit(HCI_RAW, &hdev->flags);
6075 mgmt_index_added(hdev);
6080 hci_dev_unlock(hdev);
6084 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6085 void *data, u16 len)
6087 struct mgmt_cp_set_public_address *cp = data;
6091 BT_DBG("%s", hdev->name);
6093 if (hdev_is_powered(hdev))
6094 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6095 MGMT_STATUS_REJECTED);
6097 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6098 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6099 MGMT_STATUS_INVALID_PARAMS);
6101 if (!hdev->set_bdaddr)
6102 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6103 MGMT_STATUS_NOT_SUPPORTED);
6107 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6108 bacpy(&hdev->public_addr, &cp->bdaddr);
6110 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6117 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6118 err = new_options(hdev, sk);
6120 if (is_configured(hdev)) {
6121 mgmt_index_removed(hdev);
6123 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6125 hci_dev_set_flag(hdev, HCI_CONFIG);
6126 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6128 queue_work(hdev->req_workqueue, &hdev->power_on);
6132 hci_dev_unlock(hdev);
6136 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6139 eir[eir_len++] = sizeof(type) + data_len;
6140 eir[eir_len++] = type;
6141 memcpy(&eir[eir_len], data, data_len);
6142 eir_len += data_len;
6147 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6148 void *data, u16 data_len)
6150 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6151 struct mgmt_rp_read_local_oob_ext_data *rp;
6154 u8 status, flags, role, addr[7], hash[16], rand[16];
6157 BT_DBG("%s", hdev->name);
6159 if (!hdev_is_powered(hdev))
6160 return mgmt_cmd_complete(sk, hdev->id,
6161 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6162 MGMT_STATUS_NOT_POWERED,
6163 &cp->type, sizeof(cp->type));
6166 case BIT(BDADDR_BREDR):
6167 status = mgmt_bredr_support(hdev);
6169 return mgmt_cmd_complete(sk, hdev->id,
6170 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6175 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6176 status = mgmt_le_support(hdev);
6178 return mgmt_cmd_complete(sk, hdev->id,
6179 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6182 eir_len = 9 + 3 + 18 + 18 + 3;
6185 return mgmt_cmd_complete(sk, hdev->id,
6186 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6187 MGMT_STATUS_INVALID_PARAMS,
6188 &cp->type, sizeof(cp->type));
6193 rp_len = sizeof(*rp) + eir_len;
6194 rp = kmalloc(rp_len, GFP_ATOMIC);
6196 hci_dev_unlock(hdev);
6202 case BIT(BDADDR_BREDR):
6203 eir_len = eir_append_data(rp->eir, eir_len, EIR_CLASS_OF_DEV,
6204 hdev->dev_class, 3);
6206 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6207 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6208 smp_generate_oob(hdev, hash, rand) < 0) {
6209 hci_dev_unlock(hdev);
6210 err = mgmt_cmd_complete(sk, hdev->id,
6211 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6213 &cp->type, sizeof(cp->type));
6217 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6218 memcpy(addr, &hdev->rpa, 6);
6220 } else if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6221 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6222 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6223 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6224 memcpy(addr, &hdev->static_addr, 6);
6227 memcpy(addr, &hdev->bdaddr, 6);
6231 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6232 addr, sizeof(addr));
6234 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6239 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6240 &role, sizeof(role));
6242 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6243 eir_len = eir_append_data(rp->eir, eir_len,
6245 hash, sizeof(hash));
6247 eir_len = eir_append_data(rp->eir, eir_len,
6249 rand, sizeof(rand));
6252 flags = get_adv_discov_flags(hdev);
6254 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6255 flags |= LE_AD_NO_BREDR;
6257 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6258 &flags, sizeof(flags));
6262 rp->type = cp->type;
6263 rp->eir_len = cpu_to_le16(eir_len);
6265 hci_dev_unlock(hdev);
6267 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6269 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6270 MGMT_STATUS_SUCCESS, rp, sizeof(*rp) + eir_len);
6274 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6275 rp, sizeof(*rp) + eir_len,
6276 HCI_MGMT_OOB_DATA_EVENTS, sk);
6284 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6285 void *data, u16 data_len)
6287 struct mgmt_rp_read_adv_features *rp;
6291 BT_DBG("%s", hdev->name);
6295 rp_len = sizeof(*rp);
6296 rp = kmalloc(rp_len, GFP_ATOMIC);
6298 hci_dev_unlock(hdev);
6302 rp->supported_flags = cpu_to_le32(0);
6303 rp->max_adv_data_len = 31;
6304 rp->max_scan_rsp_len = 31;
6305 rp->max_instances = 0;
6306 rp->num_instances = 0;
6308 hci_dev_unlock(hdev);
6310 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6311 MGMT_STATUS_SUCCESS, rp, rp_len);
6318 static const struct hci_mgmt_handler mgmt_handlers[] = {
6319 { NULL }, /* 0x0000 (no command) */
6320 { read_version, MGMT_READ_VERSION_SIZE,
6322 HCI_MGMT_UNTRUSTED },
6323 { read_commands, MGMT_READ_COMMANDS_SIZE,
6325 HCI_MGMT_UNTRUSTED },
6326 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6328 HCI_MGMT_UNTRUSTED },
6329 { read_controller_info, MGMT_READ_INFO_SIZE,
6330 HCI_MGMT_UNTRUSTED },
6331 { set_powered, MGMT_SETTING_SIZE },
6332 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
6333 { set_connectable, MGMT_SETTING_SIZE },
6334 { set_fast_connectable, MGMT_SETTING_SIZE },
6335 { set_bondable, MGMT_SETTING_SIZE },
6336 { set_link_security, MGMT_SETTING_SIZE },
6337 { set_ssp, MGMT_SETTING_SIZE },
6338 { set_hs, MGMT_SETTING_SIZE },
6339 { set_le, MGMT_SETTING_SIZE },
6340 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
6341 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
6342 { add_uuid, MGMT_ADD_UUID_SIZE },
6343 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
6344 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6346 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6348 { disconnect, MGMT_DISCONNECT_SIZE },
6349 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
6350 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
6351 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6352 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
6353 { pair_device, MGMT_PAIR_DEVICE_SIZE },
6354 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6355 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
6356 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
6357 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6358 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
6359 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6360 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6361 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6363 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6364 { start_discovery, MGMT_START_DISCOVERY_SIZE },
6365 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
6366 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
6367 { block_device, MGMT_BLOCK_DEVICE_SIZE },
6368 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
6369 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
6370 { set_advertising, MGMT_SETTING_SIZE },
6371 { set_bredr, MGMT_SETTING_SIZE },
6372 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
6373 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
6374 { set_secure_conn, MGMT_SETTING_SIZE },
6375 { set_debug_keys, MGMT_SETTING_SIZE },
6376 { set_privacy, MGMT_SET_PRIVACY_SIZE },
6377 { load_irks, MGMT_LOAD_IRKS_SIZE,
6379 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
6380 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
6381 { add_device, MGMT_ADD_DEVICE_SIZE },
6382 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
6383 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6385 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6387 HCI_MGMT_UNTRUSTED },
6388 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6389 HCI_MGMT_UNCONFIGURED |
6390 HCI_MGMT_UNTRUSTED },
6391 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6392 HCI_MGMT_UNCONFIGURED },
6393 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6394 HCI_MGMT_UNCONFIGURED },
6395 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6397 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6398 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
6400 HCI_MGMT_UNTRUSTED },
6401 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
6404 void mgmt_index_added(struct hci_dev *hdev)
6406 struct mgmt_ev_ext_index ev;
6408 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6411 switch (hdev->dev_type) {
6413 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6414 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
6415 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6418 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
6419 HCI_MGMT_INDEX_EVENTS);
6432 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
6433 HCI_MGMT_EXT_INDEX_EVENTS);
6436 void mgmt_index_removed(struct hci_dev *hdev)
6438 struct mgmt_ev_ext_index ev;
6439 u8 status = MGMT_STATUS_INVALID_INDEX;
6441 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6444 switch (hdev->dev_type) {
6446 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6448 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6449 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
6450 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6453 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
6454 HCI_MGMT_INDEX_EVENTS);
6467 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
6468 HCI_MGMT_EXT_INDEX_EVENTS);
6471 /* This function requires the caller holds hdev->lock */
6472 static void restart_le_actions(struct hci_request *req)
6474 struct hci_dev *hdev = req->hdev;
6475 struct hci_conn_params *p;
6477 list_for_each_entry(p, &hdev->le_conn_params, list) {
6478 /* Needed for AUTO_OFF case where might not "really"
6479 * have been powered off.
6481 list_del_init(&p->action);
6483 switch (p->auto_connect) {
6484 case HCI_AUTO_CONN_DIRECT:
6485 case HCI_AUTO_CONN_ALWAYS:
6486 list_add(&p->action, &hdev->pend_le_conns);
6488 case HCI_AUTO_CONN_REPORT:
6489 list_add(&p->action, &hdev->pend_le_reports);
6496 __hci_update_background_scan(req);
6499 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6501 struct cmd_lookup match = { NULL, hdev };
6503 BT_DBG("status 0x%02x", status);
6506 /* Register the available SMP channels (BR/EDR and LE) only
6507 * when successfully powering on the controller. This late
6508 * registration is required so that LE SMP can clearly
6509 * decide if the public address or static address is used.
6516 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6518 new_settings(hdev, match.sk);
6520 hci_dev_unlock(hdev);
6526 static int powered_update_hci(struct hci_dev *hdev)
6528 struct hci_request req;
6531 hci_req_init(&req, hdev);
6533 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
6534 !lmp_host_ssp_capable(hdev)) {
6537 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
6539 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6542 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
6543 sizeof(support), &support);
6547 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
6548 lmp_bredr_capable(hdev)) {
6549 struct hci_cp_write_le_host_supported cp;
6554 /* Check first if we already have the right
6555 * host state (host features set)
6557 if (cp.le != lmp_host_le_capable(hdev) ||
6558 cp.simul != lmp_host_le_br_capable(hdev))
6559 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6563 if (lmp_le_capable(hdev)) {
6564 /* Make sure the controller has a good default for
6565 * advertising data. This also applies to the case
6566 * where BR/EDR was toggled during the AUTO_OFF phase.
6568 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
6569 update_adv_data(&req);
6570 update_scan_rsp_data(&req);
6573 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6574 enable_advertising(&req);
6576 restart_le_actions(&req);
6579 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
6580 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6581 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6582 sizeof(link_sec), &link_sec);
6584 if (lmp_bredr_capable(hdev)) {
6585 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
6586 write_fast_connectable(&req, true);
6588 write_fast_connectable(&req, false);
6589 __hci_update_page_scan(&req);
6595 return hci_req_run(&req, powered_complete);
6598 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6600 struct cmd_lookup match = { NULL, hdev };
6601 u8 status, zero_cod[] = { 0, 0, 0 };
6604 if (!hci_dev_test_flag(hdev, HCI_MGMT))
6608 if (powered_update_hci(hdev) == 0)
6611 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6616 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6618 /* If the power off is because of hdev unregistration let
6619 * use the appropriate INVALID_INDEX status. Otherwise use
6620 * NOT_POWERED. We cover both scenarios here since later in
6621 * mgmt_index_removed() any hci_conn callbacks will have already
6622 * been triggered, potentially causing misleading DISCONNECTED
6625 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
6626 status = MGMT_STATUS_INVALID_INDEX;
6628 status = MGMT_STATUS_NOT_POWERED;
6630 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6632 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6633 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6634 zero_cod, sizeof(zero_cod), NULL);
6637 err = new_settings(hdev, match.sk);
6645 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6647 struct mgmt_pending_cmd *cmd;
6650 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
6654 if (err == -ERFKILL)
6655 status = MGMT_STATUS_RFKILLED;
6657 status = MGMT_STATUS_FAILED;
6659 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6661 mgmt_pending_remove(cmd);
6664 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6666 struct hci_request req;
6670 /* When discoverable timeout triggers, then just make sure
6671 * the limited discoverable flag is cleared. Even in the case
6672 * of a timeout triggered from general discoverable, it is
6673 * safe to unconditionally clear the flag.
6675 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
6676 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6678 hci_req_init(&req, hdev);
6679 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6680 u8 scan = SCAN_PAGE;
6681 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6682 sizeof(scan), &scan);
6685 update_adv_data(&req);
6686 hci_req_run(&req, NULL);
6688 hdev->discov_timeout = 0;
6690 new_settings(hdev, NULL);
6692 hci_dev_unlock(hdev);
6695 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6698 struct mgmt_ev_new_link_key ev;
6700 memset(&ev, 0, sizeof(ev));
6702 ev.store_hint = persistent;
6703 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6704 ev.key.addr.type = BDADDR_BREDR;
6705 ev.key.type = key->type;
6706 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6707 ev.key.pin_len = key->pin_len;
6709 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6712 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6714 switch (ltk->type) {
6717 if (ltk->authenticated)
6718 return MGMT_LTK_AUTHENTICATED;
6719 return MGMT_LTK_UNAUTHENTICATED;
6721 if (ltk->authenticated)
6722 return MGMT_LTK_P256_AUTH;
6723 return MGMT_LTK_P256_UNAUTH;
6724 case SMP_LTK_P256_DEBUG:
6725 return MGMT_LTK_P256_DEBUG;
6728 return MGMT_LTK_UNAUTHENTICATED;
6731 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6733 struct mgmt_ev_new_long_term_key ev;
6735 memset(&ev, 0, sizeof(ev));
6737 /* Devices using resolvable or non-resolvable random addresses
6738 * without providing an indentity resolving key don't require
6739 * to store long term keys. Their addresses will change the
6742 * Only when a remote device provides an identity address
6743 * make sure the long term key is stored. If the remote
6744 * identity is known, the long term keys are internally
6745 * mapped to the identity address. So allow static random
6746 * and public addresses here.
6748 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6749 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6750 ev.store_hint = 0x00;
6752 ev.store_hint = persistent;
6754 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6755 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6756 ev.key.type = mgmt_ltk_type(key);
6757 ev.key.enc_size = key->enc_size;
6758 ev.key.ediv = key->ediv;
6759 ev.key.rand = key->rand;
6761 if (key->type == SMP_LTK)
6764 memcpy(ev.key.val, key->val, sizeof(key->val));
6766 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6769 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6771 struct mgmt_ev_new_irk ev;
6773 memset(&ev, 0, sizeof(ev));
6775 /* For identity resolving keys from devices that are already
6776 * using a public address or static random address, do not
6777 * ask for storing this key. The identity resolving key really
6778 * is only mandatory for devices using resovlable random
6781 * Storing all identity resolving keys has the downside that
6782 * they will be also loaded on next boot of they system. More
6783 * identity resolving keys, means more time during scanning is
6784 * needed to actually resolve these addresses.
6786 if (bacmp(&irk->rpa, BDADDR_ANY))
6787 ev.store_hint = 0x01;
6789 ev.store_hint = 0x00;
6791 bacpy(&ev.rpa, &irk->rpa);
6792 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6793 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6794 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6796 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6799 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6802 struct mgmt_ev_new_csrk ev;
6804 memset(&ev, 0, sizeof(ev));
6806 /* Devices using resolvable or non-resolvable random addresses
6807 * without providing an indentity resolving key don't require
6808 * to store signature resolving keys. Their addresses will change
6809 * the next time around.
6811 * Only when a remote device provides an identity address
6812 * make sure the signature resolving key is stored. So allow
6813 * static random and public addresses here.
6815 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6816 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6817 ev.store_hint = 0x00;
6819 ev.store_hint = persistent;
6821 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6822 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6823 ev.key.type = csrk->type;
6824 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6826 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6829 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6830 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6831 u16 max_interval, u16 latency, u16 timeout)
6833 struct mgmt_ev_new_conn_param ev;
6835 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6838 memset(&ev, 0, sizeof(ev));
6839 bacpy(&ev.addr.bdaddr, bdaddr);
6840 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6841 ev.store_hint = store_hint;
6842 ev.min_interval = cpu_to_le16(min_interval);
6843 ev.max_interval = cpu_to_le16(max_interval);
6844 ev.latency = cpu_to_le16(latency);
6845 ev.timeout = cpu_to_le16(timeout);
6847 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6850 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6851 u32 flags, u8 *name, u8 name_len)
6854 struct mgmt_ev_device_connected *ev = (void *) buf;
6857 bacpy(&ev->addr.bdaddr, &conn->dst);
6858 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6860 ev->flags = __cpu_to_le32(flags);
6862 /* We must ensure that the EIR Data fields are ordered and
6863 * unique. Keep it simple for now and avoid the problem by not
6864 * adding any BR/EDR data to the LE adv.
6866 if (conn->le_adv_data_len > 0) {
6867 memcpy(&ev->eir[eir_len],
6868 conn->le_adv_data, conn->le_adv_data_len);
6869 eir_len = conn->le_adv_data_len;
6872 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6875 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6876 eir_len = eir_append_data(ev->eir, eir_len,
6878 conn->dev_class, 3);
6881 ev->eir_len = cpu_to_le16(eir_len);
6883 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6884 sizeof(*ev) + eir_len, NULL);
6887 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
6889 struct sock **sk = data;
6891 cmd->cmd_complete(cmd, 0);
6896 mgmt_pending_remove(cmd);
6899 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
6901 struct hci_dev *hdev = data;
6902 struct mgmt_cp_unpair_device *cp = cmd->param;
6904 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6906 cmd->cmd_complete(cmd, 0);
6907 mgmt_pending_remove(cmd);
6910 bool mgmt_powering_down(struct hci_dev *hdev)
6912 struct mgmt_pending_cmd *cmd;
6913 struct mgmt_mode *cp;
6915 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
6926 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6927 u8 link_type, u8 addr_type, u8 reason,
6928 bool mgmt_connected)
6930 struct mgmt_ev_device_disconnected ev;
6931 struct sock *sk = NULL;
6933 /* The connection is still in hci_conn_hash so test for 1
6934 * instead of 0 to know if this is the last one.
6936 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6937 cancel_delayed_work(&hdev->power_off);
6938 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6941 if (!mgmt_connected)
6944 if (link_type != ACL_LINK && link_type != LE_LINK)
6947 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6949 bacpy(&ev.addr.bdaddr, bdaddr);
6950 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6953 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6958 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6962 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6963 u8 link_type, u8 addr_type, u8 status)
6965 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6966 struct mgmt_cp_disconnect *cp;
6967 struct mgmt_pending_cmd *cmd;
6969 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6972 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
6978 if (bacmp(bdaddr, &cp->addr.bdaddr))
6981 if (cp->addr.type != bdaddr_type)
6984 cmd->cmd_complete(cmd, mgmt_status(status));
6985 mgmt_pending_remove(cmd);
6988 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6989 u8 addr_type, u8 status)
6991 struct mgmt_ev_connect_failed ev;
6993 /* The connection is still in hci_conn_hash so test for 1
6994 * instead of 0 to know if this is the last one.
6996 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6997 cancel_delayed_work(&hdev->power_off);
6998 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7001 bacpy(&ev.addr.bdaddr, bdaddr);
7002 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7003 ev.status = mgmt_status(status);
7005 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7008 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7010 struct mgmt_ev_pin_code_request ev;
7012 bacpy(&ev.addr.bdaddr, bdaddr);
7013 ev.addr.type = BDADDR_BREDR;
7016 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7019 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7022 struct mgmt_pending_cmd *cmd;
7024 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7028 cmd->cmd_complete(cmd, mgmt_status(status));
7029 mgmt_pending_remove(cmd);
7032 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7035 struct mgmt_pending_cmd *cmd;
7037 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7041 cmd->cmd_complete(cmd, mgmt_status(status));
7042 mgmt_pending_remove(cmd);
7045 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7046 u8 link_type, u8 addr_type, u32 value,
7049 struct mgmt_ev_user_confirm_request ev;
7051 BT_DBG("%s", hdev->name);
7053 bacpy(&ev.addr.bdaddr, bdaddr);
7054 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7055 ev.confirm_hint = confirm_hint;
7056 ev.value = cpu_to_le32(value);
7058 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7062 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7063 u8 link_type, u8 addr_type)
7065 struct mgmt_ev_user_passkey_request ev;
7067 BT_DBG("%s", hdev->name);
7069 bacpy(&ev.addr.bdaddr, bdaddr);
7070 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7072 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7076 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7077 u8 link_type, u8 addr_type, u8 status,
7080 struct mgmt_pending_cmd *cmd;
7082 cmd = pending_find(opcode, hdev);
7086 cmd->cmd_complete(cmd, mgmt_status(status));
7087 mgmt_pending_remove(cmd);
7092 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7093 u8 link_type, u8 addr_type, u8 status)
7095 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7096 status, MGMT_OP_USER_CONFIRM_REPLY);
7099 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7100 u8 link_type, u8 addr_type, u8 status)
7102 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7104 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7107 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7108 u8 link_type, u8 addr_type, u8 status)
7110 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7111 status, MGMT_OP_USER_PASSKEY_REPLY);
7114 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7115 u8 link_type, u8 addr_type, u8 status)
7117 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7119 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7122 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7123 u8 link_type, u8 addr_type, u32 passkey,
7126 struct mgmt_ev_passkey_notify ev;
7128 BT_DBG("%s", hdev->name);
7130 bacpy(&ev.addr.bdaddr, bdaddr);
7131 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7132 ev.passkey = __cpu_to_le32(passkey);
7133 ev.entered = entered;
7135 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7138 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7140 struct mgmt_ev_auth_failed ev;
7141 struct mgmt_pending_cmd *cmd;
7142 u8 status = mgmt_status(hci_status);
7144 bacpy(&ev.addr.bdaddr, &conn->dst);
7145 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7148 cmd = find_pairing(conn);
7150 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7151 cmd ? cmd->sk : NULL);
7154 cmd->cmd_complete(cmd, status);
7155 mgmt_pending_remove(cmd);
7159 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7161 struct cmd_lookup match = { NULL, hdev };
7165 u8 mgmt_err = mgmt_status(status);
7166 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7167 cmd_status_rsp, &mgmt_err);
7171 if (test_bit(HCI_AUTH, &hdev->flags))
7172 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7174 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7176 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7180 new_settings(hdev, match.sk);
7186 static void clear_eir(struct hci_request *req)
7188 struct hci_dev *hdev = req->hdev;
7189 struct hci_cp_write_eir cp;
7191 if (!lmp_ext_inq_capable(hdev))
7194 memset(hdev->eir, 0, sizeof(hdev->eir));
7196 memset(&cp, 0, sizeof(cp));
7198 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7201 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7203 struct cmd_lookup match = { NULL, hdev };
7204 struct hci_request req;
7205 bool changed = false;
7208 u8 mgmt_err = mgmt_status(status);
7210 if (enable && hci_dev_test_and_clear_flag(hdev,
7212 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7213 new_settings(hdev, NULL);
7216 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7222 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7224 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7226 changed = hci_dev_test_and_clear_flag(hdev,
7229 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7232 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7235 new_settings(hdev, match.sk);
7240 hci_req_init(&req, hdev);
7242 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7243 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7244 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7245 sizeof(enable), &enable);
7251 hci_req_run(&req, NULL);
7254 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7256 struct cmd_lookup *match = data;
7258 if (match->sk == NULL) {
7259 match->sk = cmd->sk;
7260 sock_hold(match->sk);
7264 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7267 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7269 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7270 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7271 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7274 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7275 dev_class, 3, NULL);
7281 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7283 struct mgmt_cp_set_local_name ev;
7284 struct mgmt_pending_cmd *cmd;
7289 memset(&ev, 0, sizeof(ev));
7290 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7291 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7293 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7295 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7297 /* If this is a HCI command related to powering on the
7298 * HCI dev don't send any mgmt signals.
7300 if (pending_find(MGMT_OP_SET_POWERED, hdev))
7304 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7305 cmd ? cmd->sk : NULL);
7308 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7309 u8 *rand192, u8 *hash256, u8 *rand256,
7312 struct mgmt_pending_cmd *cmd;
7314 BT_DBG("%s status %u", hdev->name, status);
7316 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7321 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7322 mgmt_status(status));
7324 struct mgmt_rp_read_local_oob_data rp;
7325 size_t rp_size = sizeof(rp);
7327 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7328 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7330 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7331 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7332 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7334 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7337 mgmt_cmd_complete(cmd->sk, hdev->id,
7338 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7342 mgmt_pending_remove(cmd);
7345 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7349 for (i = 0; i < uuid_count; i++) {
7350 if (!memcmp(uuid, uuids[i], 16))
7357 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7361 while (parsed < eir_len) {
7362 u8 field_len = eir[0];
7369 if (eir_len - parsed < field_len + 1)
7373 case EIR_UUID16_ALL:
7374 case EIR_UUID16_SOME:
7375 for (i = 0; i + 3 <= field_len; i += 2) {
7376 memcpy(uuid, bluetooth_base_uuid, 16);
7377 uuid[13] = eir[i + 3];
7378 uuid[12] = eir[i + 2];
7379 if (has_uuid(uuid, uuid_count, uuids))
7383 case EIR_UUID32_ALL:
7384 case EIR_UUID32_SOME:
7385 for (i = 0; i + 5 <= field_len; i += 4) {
7386 memcpy(uuid, bluetooth_base_uuid, 16);
7387 uuid[15] = eir[i + 5];
7388 uuid[14] = eir[i + 4];
7389 uuid[13] = eir[i + 3];
7390 uuid[12] = eir[i + 2];
7391 if (has_uuid(uuid, uuid_count, uuids))
7395 case EIR_UUID128_ALL:
7396 case EIR_UUID128_SOME:
7397 for (i = 0; i + 17 <= field_len; i += 16) {
7398 memcpy(uuid, eir + i + 2, 16);
7399 if (has_uuid(uuid, uuid_count, uuids))
7405 parsed += field_len + 1;
7406 eir += field_len + 1;
7412 static void restart_le_scan(struct hci_dev *hdev)
7414 /* If controller is not scanning we are done. */
7415 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7418 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7419 hdev->discovery.scan_start +
7420 hdev->discovery.scan_duration))
7423 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
7424 DISCOV_LE_RESTART_DELAY);
7427 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7428 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7430 /* If a RSSI threshold has been specified, and
7431 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7432 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7433 * is set, let it through for further processing, as we might need to
7436 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7437 * the results are also dropped.
7439 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7440 (rssi == HCI_RSSI_INVALID ||
7441 (rssi < hdev->discovery.rssi &&
7442 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7445 if (hdev->discovery.uuid_count != 0) {
7446 /* If a list of UUIDs is provided in filter, results with no
7447 * matching UUID should be dropped.
7449 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7450 hdev->discovery.uuids) &&
7451 !eir_has_uuids(scan_rsp, scan_rsp_len,
7452 hdev->discovery.uuid_count,
7453 hdev->discovery.uuids))
7457 /* If duplicate filtering does not report RSSI changes, then restart
7458 * scanning to ensure updated result with updated RSSI values.
7460 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7461 restart_le_scan(hdev);
7463 /* Validate RSSI value against the RSSI threshold once more. */
7464 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7465 rssi < hdev->discovery.rssi)
7472 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7473 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7474 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7477 struct mgmt_ev_device_found *ev = (void *)buf;
7480 /* Don't send events for a non-kernel initiated discovery. With
7481 * LE one exception is if we have pend_le_reports > 0 in which
7482 * case we're doing passive scanning and want these events.
7484 if (!hci_discovery_active(hdev)) {
7485 if (link_type == ACL_LINK)
7487 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7491 if (hdev->discovery.result_filtering) {
7492 /* We are using service discovery */
7493 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7498 /* Make sure that the buffer is big enough. The 5 extra bytes
7499 * are for the potential CoD field.
7501 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7504 memset(buf, 0, sizeof(buf));
7506 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7507 * RSSI value was reported as 0 when not available. This behavior
7508 * is kept when using device discovery. This is required for full
7509 * backwards compatibility with the API.
7511 * However when using service discovery, the value 127 will be
7512 * returned when the RSSI is not available.
7514 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7515 link_type == ACL_LINK)
7518 bacpy(&ev->addr.bdaddr, bdaddr);
7519 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7521 ev->flags = cpu_to_le32(flags);
7524 /* Copy EIR or advertising data into event */
7525 memcpy(ev->eir, eir, eir_len);
7527 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7528 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7531 if (scan_rsp_len > 0)
7532 /* Append scan response data to event */
7533 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7535 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7536 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7538 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7541 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7542 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7544 struct mgmt_ev_device_found *ev;
7545 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7548 ev = (struct mgmt_ev_device_found *) buf;
7550 memset(buf, 0, sizeof(buf));
7552 bacpy(&ev->addr.bdaddr, bdaddr);
7553 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7556 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7559 ev->eir_len = cpu_to_le16(eir_len);
7561 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7564 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7566 struct mgmt_ev_discovering ev;
7568 BT_DBG("%s discovering %u", hdev->name, discovering);
7570 memset(&ev, 0, sizeof(ev));
7571 ev.type = hdev->discovery.type;
7572 ev.discovering = discovering;
7574 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7577 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7579 BT_DBG("%s status %u", hdev->name, status);
7582 void mgmt_reenable_advertising(struct hci_dev *hdev)
7584 struct hci_request req;
7586 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
7589 hci_req_init(&req, hdev);
7590 enable_advertising(&req);
7591 hci_req_run(&req, adv_enable_complete);
7594 static struct hci_mgmt_chan chan = {
7595 .channel = HCI_CHANNEL_CONTROL,
7596 .handler_count = ARRAY_SIZE(mgmt_handlers),
7597 .handlers = mgmt_handlers,
7598 .hdev_init = mgmt_init_hdev,
7603 return hci_mgmt_chan_register(&chan);
7606 void mgmt_exit(void)
7608 hci_mgmt_chan_unregister(&chan);