2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 9
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
51 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
107 static const u16 mgmt_events[] = {
108 MGMT_EV_CONTROLLER_ERROR,
110 MGMT_EV_INDEX_REMOVED,
111 MGMT_EV_NEW_SETTINGS,
112 MGMT_EV_CLASS_OF_DEV_CHANGED,
113 MGMT_EV_LOCAL_NAME_CHANGED,
114 MGMT_EV_NEW_LINK_KEY,
115 MGMT_EV_NEW_LONG_TERM_KEY,
116 MGMT_EV_DEVICE_CONNECTED,
117 MGMT_EV_DEVICE_DISCONNECTED,
118 MGMT_EV_CONNECT_FAILED,
119 MGMT_EV_PIN_CODE_REQUEST,
120 MGMT_EV_USER_CONFIRM_REQUEST,
121 MGMT_EV_USER_PASSKEY_REQUEST,
123 MGMT_EV_DEVICE_FOUND,
125 MGMT_EV_DEVICE_BLOCKED,
126 MGMT_EV_DEVICE_UNBLOCKED,
127 MGMT_EV_DEVICE_UNPAIRED,
128 MGMT_EV_PASSKEY_NOTIFY,
131 MGMT_EV_DEVICE_ADDED,
132 MGMT_EV_DEVICE_REMOVED,
133 MGMT_EV_NEW_CONN_PARAM,
134 MGMT_EV_UNCONF_INDEX_ADDED,
135 MGMT_EV_UNCONF_INDEX_REMOVED,
136 MGMT_EV_NEW_CONFIG_OPTIONS,
137 MGMT_EV_EXT_INDEX_ADDED,
138 MGMT_EV_EXT_INDEX_REMOVED,
139 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 MGMT_EV_ADVERTISING_ADDED,
141 MGMT_EV_ADVERTISING_REMOVED,
144 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
146 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
147 "\x00\x00\x00\x00\x00\x00\x00\x00"
149 /* HCI to MGMT error code conversion table */
150 static u8 mgmt_status_table[] = {
152 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
153 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
154 MGMT_STATUS_FAILED, /* Hardware Failure */
155 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
156 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
157 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
158 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
159 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
160 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
161 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
162 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
163 MGMT_STATUS_BUSY, /* Command Disallowed */
164 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
165 MGMT_STATUS_REJECTED, /* Rejected Security */
166 MGMT_STATUS_REJECTED, /* Rejected Personal */
167 MGMT_STATUS_TIMEOUT, /* Host Timeout */
168 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
169 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
170 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
171 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
172 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
173 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
174 MGMT_STATUS_BUSY, /* Repeated Attempts */
175 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
176 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
177 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
178 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
179 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
180 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
181 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
182 MGMT_STATUS_FAILED, /* Unspecified Error */
183 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
184 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
185 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
186 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
187 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
188 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
189 MGMT_STATUS_FAILED, /* Unit Link Key Used */
190 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
191 MGMT_STATUS_TIMEOUT, /* Instant Passed */
192 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
193 MGMT_STATUS_FAILED, /* Transaction Collision */
194 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
195 MGMT_STATUS_REJECTED, /* QoS Rejected */
196 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
197 MGMT_STATUS_REJECTED, /* Insufficient Security */
198 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
199 MGMT_STATUS_BUSY, /* Role Switch Pending */
200 MGMT_STATUS_FAILED, /* Slot Violation */
201 MGMT_STATUS_FAILED, /* Role Switch Failed */
202 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
203 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
204 MGMT_STATUS_BUSY, /* Host Busy Pairing */
205 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
206 MGMT_STATUS_BUSY, /* Controller Busy */
207 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
208 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
209 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
210 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
211 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
214 static u8 mgmt_status(u8 hci_status)
216 if (hci_status < ARRAY_SIZE(mgmt_status_table))
217 return mgmt_status_table[hci_status];
219 return MGMT_STATUS_FAILED;
222 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
225 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
229 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
230 u16 len, int flag, struct sock *skip_sk)
232 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
236 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
237 u16 len, struct sock *skip_sk)
239 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
240 HCI_MGMT_GENERIC_EVENTS, skip_sk);
243 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
244 struct sock *skip_sk)
246 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
247 HCI_SOCK_TRUSTED, skip_sk);
250 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
253 struct mgmt_rp_read_version rp;
255 BT_DBG("sock %p", sk);
257 rp.version = MGMT_VERSION;
258 rp.revision = cpu_to_le16(MGMT_REVISION);
260 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
264 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
267 struct mgmt_rp_read_commands *rp;
268 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
269 const u16 num_events = ARRAY_SIZE(mgmt_events);
274 BT_DBG("sock %p", sk);
276 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
278 rp = kmalloc(rp_size, GFP_KERNEL);
282 rp->num_commands = cpu_to_le16(num_commands);
283 rp->num_events = cpu_to_le16(num_events);
285 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
286 put_unaligned_le16(mgmt_commands[i], opcode);
288 for (i = 0; i < num_events; i++, opcode++)
289 put_unaligned_le16(mgmt_events[i], opcode);
291 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
298 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
301 struct mgmt_rp_read_index_list *rp;
307 BT_DBG("sock %p", sk);
309 read_lock(&hci_dev_list_lock);
312 list_for_each_entry(d, &hci_dev_list, list) {
313 if (d->dev_type == HCI_BREDR &&
314 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
318 rp_len = sizeof(*rp) + (2 * count);
319 rp = kmalloc(rp_len, GFP_ATOMIC);
321 read_unlock(&hci_dev_list_lock);
326 list_for_each_entry(d, &hci_dev_list, list) {
327 if (hci_dev_test_flag(d, HCI_SETUP) ||
328 hci_dev_test_flag(d, HCI_CONFIG) ||
329 hci_dev_test_flag(d, HCI_USER_CHANNEL))
332 /* Devices marked as raw-only are neither configured
333 * nor unconfigured controllers.
335 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
338 if (d->dev_type == HCI_BREDR &&
339 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
340 rp->index[count++] = cpu_to_le16(d->id);
341 BT_DBG("Added hci%u", d->id);
345 rp->num_controllers = cpu_to_le16(count);
346 rp_len = sizeof(*rp) + (2 * count);
348 read_unlock(&hci_dev_list_lock);
350 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
358 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
359 void *data, u16 data_len)
361 struct mgmt_rp_read_unconf_index_list *rp;
367 BT_DBG("sock %p", sk);
369 read_lock(&hci_dev_list_lock);
372 list_for_each_entry(d, &hci_dev_list, list) {
373 if (d->dev_type == HCI_BREDR &&
374 hci_dev_test_flag(d, HCI_UNCONFIGURED))
378 rp_len = sizeof(*rp) + (2 * count);
379 rp = kmalloc(rp_len, GFP_ATOMIC);
381 read_unlock(&hci_dev_list_lock);
386 list_for_each_entry(d, &hci_dev_list, list) {
387 if (hci_dev_test_flag(d, HCI_SETUP) ||
388 hci_dev_test_flag(d, HCI_CONFIG) ||
389 hci_dev_test_flag(d, HCI_USER_CHANNEL))
392 /* Devices marked as raw-only are neither configured
393 * nor unconfigured controllers.
395 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
398 if (d->dev_type == HCI_BREDR &&
399 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
400 rp->index[count++] = cpu_to_le16(d->id);
401 BT_DBG("Added hci%u", d->id);
405 rp->num_controllers = cpu_to_le16(count);
406 rp_len = sizeof(*rp) + (2 * count);
408 read_unlock(&hci_dev_list_lock);
410 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
411 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
418 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
419 void *data, u16 data_len)
421 struct mgmt_rp_read_ext_index_list *rp;
427 BT_DBG("sock %p", sk);
429 read_lock(&hci_dev_list_lock);
432 list_for_each_entry(d, &hci_dev_list, list) {
433 if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
437 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
438 rp = kmalloc(rp_len, GFP_ATOMIC);
440 read_unlock(&hci_dev_list_lock);
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (hci_dev_test_flag(d, HCI_SETUP) ||
447 hci_dev_test_flag(d, HCI_CONFIG) ||
448 hci_dev_test_flag(d, HCI_USER_CHANNEL))
451 /* Devices marked as raw-only are neither configured
452 * nor unconfigured controllers.
454 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
457 if (d->dev_type == HCI_BREDR) {
458 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
459 rp->entry[count].type = 0x01;
461 rp->entry[count].type = 0x00;
462 } else if (d->dev_type == HCI_AMP) {
463 rp->entry[count].type = 0x02;
468 rp->entry[count].bus = d->bus;
469 rp->entry[count++].index = cpu_to_le16(d->id);
470 BT_DBG("Added hci%u", d->id);
473 rp->num_controllers = cpu_to_le16(count);
474 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
476 read_unlock(&hci_dev_list_lock);
478 /* If this command is called at least once, then all the
479 * default index and unconfigured index events are disabled
480 * and from now on only extended index events are used.
482 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
483 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
484 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
486 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
487 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
494 static bool is_configured(struct hci_dev *hdev)
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 !bacmp(&hdev->public_addr, BDADDR_ANY))
507 static __le32 get_missing_options(struct hci_dev *hdev)
511 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
512 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
513 options |= MGMT_OPTION_EXTERNAL_CONFIG;
515 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
516 !bacmp(&hdev->public_addr, BDADDR_ANY))
517 options |= MGMT_OPTION_PUBLIC_ADDRESS;
519 return cpu_to_le32(options);
522 static int new_options(struct hci_dev *hdev, struct sock *skip)
524 __le32 options = get_missing_options(hdev);
526 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
527 sizeof(options), skip);
530 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
532 __le32 options = get_missing_options(hdev);
534 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
538 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
539 void *data, u16 data_len)
541 struct mgmt_rp_read_config_info rp;
544 BT_DBG("sock %p %s", sk, hdev->name);
548 memset(&rp, 0, sizeof(rp));
549 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
551 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
552 options |= MGMT_OPTION_EXTERNAL_CONFIG;
554 if (hdev->set_bdaddr)
555 options |= MGMT_OPTION_PUBLIC_ADDRESS;
557 rp.supported_options = cpu_to_le32(options);
558 rp.missing_options = get_missing_options(hdev);
560 hci_dev_unlock(hdev);
562 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
566 static u32 get_supported_settings(struct hci_dev *hdev)
570 settings |= MGMT_SETTING_POWERED;
571 settings |= MGMT_SETTING_BONDABLE;
572 settings |= MGMT_SETTING_DEBUG_KEYS;
573 settings |= MGMT_SETTING_CONNECTABLE;
574 settings |= MGMT_SETTING_DISCOVERABLE;
576 if (lmp_bredr_capable(hdev)) {
577 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
578 settings |= MGMT_SETTING_FAST_CONNECTABLE;
579 settings |= MGMT_SETTING_BREDR;
580 settings |= MGMT_SETTING_LINK_SECURITY;
582 if (lmp_ssp_capable(hdev)) {
583 settings |= MGMT_SETTING_SSP;
584 settings |= MGMT_SETTING_HS;
587 if (lmp_sc_capable(hdev))
588 settings |= MGMT_SETTING_SECURE_CONN;
591 if (lmp_le_capable(hdev)) {
592 settings |= MGMT_SETTING_LE;
593 settings |= MGMT_SETTING_ADVERTISING;
594 settings |= MGMT_SETTING_SECURE_CONN;
595 settings |= MGMT_SETTING_PRIVACY;
596 settings |= MGMT_SETTING_STATIC_ADDRESS;
599 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
601 settings |= MGMT_SETTING_CONFIGURATION;
606 static u32 get_current_settings(struct hci_dev *hdev)
610 if (hdev_is_powered(hdev))
611 settings |= MGMT_SETTING_POWERED;
613 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
614 settings |= MGMT_SETTING_CONNECTABLE;
616 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
617 settings |= MGMT_SETTING_FAST_CONNECTABLE;
619 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
620 settings |= MGMT_SETTING_DISCOVERABLE;
622 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
623 settings |= MGMT_SETTING_BONDABLE;
625 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
626 settings |= MGMT_SETTING_BREDR;
628 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
629 settings |= MGMT_SETTING_LE;
631 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
632 settings |= MGMT_SETTING_LINK_SECURITY;
634 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
635 settings |= MGMT_SETTING_SSP;
637 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
638 settings |= MGMT_SETTING_HS;
640 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
641 settings |= MGMT_SETTING_ADVERTISING;
643 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
644 settings |= MGMT_SETTING_SECURE_CONN;
646 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
647 settings |= MGMT_SETTING_DEBUG_KEYS;
649 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
650 settings |= MGMT_SETTING_PRIVACY;
652 /* The current setting for static address has two purposes. The
653 * first is to indicate if the static address will be used and
654 * the second is to indicate if it is actually set.
656 * This means if the static address is not configured, this flag
657 * will never bet set. If the address is configured, then if the
658 * address is actually used decides if the flag is set or not.
660 * For single mode LE only controllers and dual-mode controllers
661 * with BR/EDR disabled, the existence of the static address will
664 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
665 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
666 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
667 if (bacmp(&hdev->static_addr, BDADDR_ANY))
668 settings |= MGMT_SETTING_STATIC_ADDRESS;
674 #define PNP_INFO_SVCLASS_ID 0x1200
676 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
678 u8 *ptr = data, *uuids_start = NULL;
679 struct bt_uuid *uuid;
684 list_for_each_entry(uuid, &hdev->uuids, list) {
687 if (uuid->size != 16)
690 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
694 if (uuid16 == PNP_INFO_SVCLASS_ID)
700 uuids_start[1] = EIR_UUID16_ALL;
704 /* Stop if not enough space to put next UUID */
705 if ((ptr - data) + sizeof(u16) > len) {
706 uuids_start[1] = EIR_UUID16_SOME;
710 *ptr++ = (uuid16 & 0x00ff);
711 *ptr++ = (uuid16 & 0xff00) >> 8;
712 uuids_start[0] += sizeof(uuid16);
718 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
720 u8 *ptr = data, *uuids_start = NULL;
721 struct bt_uuid *uuid;
726 list_for_each_entry(uuid, &hdev->uuids, list) {
727 if (uuid->size != 32)
733 uuids_start[1] = EIR_UUID32_ALL;
737 /* Stop if not enough space to put next UUID */
738 if ((ptr - data) + sizeof(u32) > len) {
739 uuids_start[1] = EIR_UUID32_SOME;
743 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
745 uuids_start[0] += sizeof(u32);
751 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
753 u8 *ptr = data, *uuids_start = NULL;
754 struct bt_uuid *uuid;
759 list_for_each_entry(uuid, &hdev->uuids, list) {
760 if (uuid->size != 128)
766 uuids_start[1] = EIR_UUID128_ALL;
770 /* Stop if not enough space to put next UUID */
771 if ((ptr - data) + 16 > len) {
772 uuids_start[1] = EIR_UUID128_SOME;
776 memcpy(ptr, uuid->uuid, 16);
778 uuids_start[0] += 16;
784 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
786 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
789 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
790 struct hci_dev *hdev,
793 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
796 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
801 name_len = strlen(hdev->dev_name);
803 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
805 if (name_len > max_len) {
807 ptr[1] = EIR_NAME_SHORT;
809 ptr[1] = EIR_NAME_COMPLETE;
811 ptr[0] = name_len + 1;
813 memcpy(ptr + 2, hdev->dev_name, name_len);
815 ad_len += (name_len + 2);
816 ptr += (name_len + 2);
822 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
824 /* TODO: Set the appropriate entries based on advertising instance flags
825 * here once flags other than 0 are supported.
827 memcpy(ptr, hdev->adv_instance.scan_rsp_data,
828 hdev->adv_instance.scan_rsp_len);
830 return hdev->adv_instance.scan_rsp_len;
833 static void update_scan_rsp_data_for_instance(struct hci_request *req,
836 struct hci_dev *hdev = req->hdev;
837 struct hci_cp_le_set_scan_rsp_data cp;
840 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
843 memset(&cp, 0, sizeof(cp));
846 len = create_instance_scan_rsp_data(hdev, cp.data);
848 len = create_default_scan_rsp_data(hdev, cp.data);
850 if (hdev->scan_rsp_data_len == len &&
851 !memcmp(cp.data, hdev->scan_rsp_data, len))
854 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
855 hdev->scan_rsp_data_len = len;
859 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
862 static void update_scan_rsp_data(struct hci_request *req)
864 struct hci_dev *hdev = req->hdev;
867 /* The "Set Advertising" setting supersedes the "Add Advertising"
868 * setting. Here we set the scan response data based on which
869 * setting was set. When neither apply, default to the global settings,
870 * represented by instance "0".
872 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
873 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
878 update_scan_rsp_data_for_instance(req, instance);
881 static u8 get_adv_discov_flags(struct hci_dev *hdev)
883 struct mgmt_pending_cmd *cmd;
885 /* If there's a pending mgmt command the flags will not yet have
886 * their final values, so check for this first.
888 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
890 struct mgmt_mode *cp = cmd->param;
892 return LE_AD_GENERAL;
893 else if (cp->val == 0x02)
894 return LE_AD_LIMITED;
896 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
897 return LE_AD_LIMITED;
898 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
899 return LE_AD_GENERAL;
905 static u8 create_default_adv_data(struct hci_dev *hdev, u8 *ptr)
907 u8 ad_len = 0, flags = 0;
909 flags |= get_adv_discov_flags(hdev);
911 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
912 flags |= LE_AD_NO_BREDR;
915 BT_DBG("adv flags 0x%02x", flags);
925 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
927 ptr[1] = EIR_TX_POWER;
928 ptr[2] = (u8) hdev->adv_tx_power;
937 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 *ptr)
939 /* TODO: Set the appropriate entries based on advertising instance flags
940 * here once flags other than 0 are supported.
942 memcpy(ptr, hdev->adv_instance.adv_data,
943 hdev->adv_instance.adv_data_len);
945 return hdev->adv_instance.adv_data_len;
948 static void update_adv_data_for_instance(struct hci_request *req, u8 instance)
950 struct hci_dev *hdev = req->hdev;
951 struct hci_cp_le_set_adv_data cp;
954 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
957 memset(&cp, 0, sizeof(cp));
960 len = create_instance_adv_data(hdev, cp.data);
962 len = create_default_adv_data(hdev, cp.data);
964 /* There's nothing to do if the data hasn't changed */
965 if (hdev->adv_data_len == len &&
966 memcmp(cp.data, hdev->adv_data, len) == 0)
969 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
970 hdev->adv_data_len = len;
974 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
977 static void update_adv_data(struct hci_request *req)
979 struct hci_dev *hdev = req->hdev;
982 /* The "Set Advertising" setting supersedes the "Add Advertising"
983 * setting. Here we set the advertising data based on which
984 * setting was set. When neither apply, default to the global settings,
985 * represented by instance "0".
987 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
988 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
993 update_adv_data_for_instance(req, instance);
996 int mgmt_update_adv_data(struct hci_dev *hdev)
998 struct hci_request req;
1000 hci_req_init(&req, hdev);
1001 update_adv_data(&req);
1003 return hci_req_run(&req, NULL);
1006 static void create_eir(struct hci_dev *hdev, u8 *data)
1011 name_len = strlen(hdev->dev_name);
1015 if (name_len > 48) {
1017 ptr[1] = EIR_NAME_SHORT;
1019 ptr[1] = EIR_NAME_COMPLETE;
1021 /* EIR Data length */
1022 ptr[0] = name_len + 1;
1024 memcpy(ptr + 2, hdev->dev_name, name_len);
1026 ptr += (name_len + 2);
1029 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1031 ptr[1] = EIR_TX_POWER;
1032 ptr[2] = (u8) hdev->inq_tx_power;
1037 if (hdev->devid_source > 0) {
1039 ptr[1] = EIR_DEVICE_ID;
1041 put_unaligned_le16(hdev->devid_source, ptr + 2);
1042 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1043 put_unaligned_le16(hdev->devid_product, ptr + 6);
1044 put_unaligned_le16(hdev->devid_version, ptr + 8);
1049 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1050 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1051 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1054 static void update_eir(struct hci_request *req)
1056 struct hci_dev *hdev = req->hdev;
1057 struct hci_cp_write_eir cp;
1059 if (!hdev_is_powered(hdev))
1062 if (!lmp_ext_inq_capable(hdev))
1065 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1068 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1071 memset(&cp, 0, sizeof(cp));
1073 create_eir(hdev, cp.data);
1075 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1078 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1080 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1083 static u8 get_service_classes(struct hci_dev *hdev)
1085 struct bt_uuid *uuid;
1088 list_for_each_entry(uuid, &hdev->uuids, list)
1089 val |= uuid->svc_hint;
1094 static void update_class(struct hci_request *req)
1096 struct hci_dev *hdev = req->hdev;
1099 BT_DBG("%s", hdev->name);
1101 if (!hdev_is_powered(hdev))
1104 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1107 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1110 cod[0] = hdev->minor_class;
1111 cod[1] = hdev->major_class;
1112 cod[2] = get_service_classes(hdev);
1114 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1117 if (memcmp(cod, hdev->dev_class, 3) == 0)
1120 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1123 static bool get_connectable(struct hci_dev *hdev)
1125 struct mgmt_pending_cmd *cmd;
1127 /* If there's a pending mgmt command the flag will not yet have
1128 * it's final value, so check for this first.
1130 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1132 struct mgmt_mode *cp = cmd->param;
1136 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1139 static void disable_advertising(struct hci_request *req)
1143 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1146 static void enable_advertising(struct hci_request *req)
1148 struct hci_dev *hdev = req->hdev;
1149 struct hci_cp_le_set_adv_param cp;
1150 u8 own_addr_type, enable = 0x01;
1153 if (hci_conn_num(hdev, LE_LINK) > 0)
1156 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1157 disable_advertising(req);
1159 /* Clear the HCI_LE_ADV bit temporarily so that the
1160 * hci_update_random_address knows that it's safe to go ahead
1161 * and write a new random address. The flag will be set back on
1162 * as soon as the SET_ADV_ENABLE HCI command completes.
1164 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1166 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1169 connectable = get_connectable(hdev);
1171 /* Set require_privacy to true only when non-connectable
1172 * advertising is used. In that case it is fine to use a
1173 * non-resolvable private address.
1175 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1178 memset(&cp, 0, sizeof(cp));
1179 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1180 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1181 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1182 cp.own_address_type = own_addr_type;
1183 cp.channel_map = hdev->le_adv_channel_map;
1185 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1187 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1190 static void service_cache_off(struct work_struct *work)
1192 struct hci_dev *hdev = container_of(work, struct hci_dev,
1193 service_cache.work);
1194 struct hci_request req;
1196 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1199 hci_req_init(&req, hdev);
1206 hci_dev_unlock(hdev);
1208 hci_req_run(&req, NULL);
1211 static void rpa_expired(struct work_struct *work)
1213 struct hci_dev *hdev = container_of(work, struct hci_dev,
1215 struct hci_request req;
1219 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1221 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1224 /* The generation of a new RPA and programming it into the
1225 * controller happens in the enable_advertising() function.
1227 hci_req_init(&req, hdev);
1228 enable_advertising(&req);
1229 hci_req_run(&req, NULL);
1232 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1234 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1237 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1238 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1240 /* Non-mgmt controlled devices get this bit set
1241 * implicitly so that pairing works for them, however
1242 * for mgmt we require user-space to explicitly enable
1245 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1248 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1249 void *data, u16 data_len)
1251 struct mgmt_rp_read_info rp;
1253 BT_DBG("sock %p %s", sk, hdev->name);
1257 memset(&rp, 0, sizeof(rp));
1259 bacpy(&rp.bdaddr, &hdev->bdaddr);
1261 rp.version = hdev->hci_ver;
1262 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1264 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1265 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1267 memcpy(rp.dev_class, hdev->dev_class, 3);
1269 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1270 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1272 hci_dev_unlock(hdev);
1274 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1278 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1280 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1282 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1286 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1288 BT_DBG("%s status 0x%02x", hdev->name, status);
1290 if (hci_conn_count(hdev) == 0) {
1291 cancel_delayed_work(&hdev->power_off);
1292 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1296 static bool hci_stop_discovery(struct hci_request *req)
1298 struct hci_dev *hdev = req->hdev;
1299 struct hci_cp_remote_name_req_cancel cp;
1300 struct inquiry_entry *e;
1302 switch (hdev->discovery.state) {
1303 case DISCOVERY_FINDING:
1304 if (test_bit(HCI_INQUIRY, &hdev->flags))
1305 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1307 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1308 cancel_delayed_work(&hdev->le_scan_disable);
1309 hci_req_add_le_scan_disable(req);
1314 case DISCOVERY_RESOLVING:
1315 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1320 bacpy(&cp.bdaddr, &e->data.bdaddr);
1321 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1327 /* Passive scanning */
1328 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1329 hci_req_add_le_scan_disable(req);
1339 static int clean_up_hci_state(struct hci_dev *hdev)
1341 struct hci_request req;
1342 struct hci_conn *conn;
1343 bool discov_stopped;
1346 hci_req_init(&req, hdev);
1348 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1349 test_bit(HCI_PSCAN, &hdev->flags)) {
1351 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1354 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1355 disable_advertising(&req);
1357 discov_stopped = hci_stop_discovery(&req);
1359 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1360 struct hci_cp_disconnect dc;
1361 struct hci_cp_reject_conn_req rej;
1363 switch (conn->state) {
1366 dc.handle = cpu_to_le16(conn->handle);
1367 dc.reason = 0x15; /* Terminated due to Power Off */
1368 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1371 if (conn->type == LE_LINK)
1372 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1374 else if (conn->type == ACL_LINK)
1375 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1379 bacpy(&rej.bdaddr, &conn->dst);
1380 rej.reason = 0x15; /* Terminated due to Power Off */
1381 if (conn->type == ACL_LINK)
1382 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1384 else if (conn->type == SCO_LINK)
1385 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1391 err = hci_req_run(&req, clean_up_hci_complete);
1392 if (!err && discov_stopped)
1393 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1398 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1401 struct mgmt_mode *cp = data;
1402 struct mgmt_pending_cmd *cmd;
1405 BT_DBG("request for %s", hdev->name);
1407 if (cp->val != 0x00 && cp->val != 0x01)
1408 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1409 MGMT_STATUS_INVALID_PARAMS);
1413 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1414 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1419 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1420 cancel_delayed_work(&hdev->power_off);
1423 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1425 err = mgmt_powered(hdev, 1);
1430 if (!!cp->val == hdev_is_powered(hdev)) {
1431 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1435 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1442 queue_work(hdev->req_workqueue, &hdev->power_on);
1445 /* Disconnect connections, stop scans, etc */
1446 err = clean_up_hci_state(hdev);
1448 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1449 HCI_POWER_OFF_TIMEOUT);
1451 /* ENODATA means there were no HCI commands queued */
1452 if (err == -ENODATA) {
1453 cancel_delayed_work(&hdev->power_off);
1454 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1460 hci_dev_unlock(hdev);
1464 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1466 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1468 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1472 int mgmt_new_settings(struct hci_dev *hdev)
1474 return new_settings(hdev, NULL);
1479 struct hci_dev *hdev;
1483 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1485 struct cmd_lookup *match = data;
1487 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1489 list_del(&cmd->list);
1491 if (match->sk == NULL) {
1492 match->sk = cmd->sk;
1493 sock_hold(match->sk);
1496 mgmt_pending_free(cmd);
1499 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1503 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1504 mgmt_pending_remove(cmd);
1507 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1509 if (cmd->cmd_complete) {
1512 cmd->cmd_complete(cmd, *status);
1513 mgmt_pending_remove(cmd);
1518 cmd_status_rsp(cmd, data);
1521 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1523 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1524 cmd->param, cmd->param_len);
1527 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1529 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1530 cmd->param, sizeof(struct mgmt_addr_info));
1533 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1535 if (!lmp_bredr_capable(hdev))
1536 return MGMT_STATUS_NOT_SUPPORTED;
1537 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1538 return MGMT_STATUS_REJECTED;
1540 return MGMT_STATUS_SUCCESS;
1543 static u8 mgmt_le_support(struct hci_dev *hdev)
1545 if (!lmp_le_capable(hdev))
1546 return MGMT_STATUS_NOT_SUPPORTED;
1547 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1548 return MGMT_STATUS_REJECTED;
1550 return MGMT_STATUS_SUCCESS;
1553 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1556 struct mgmt_pending_cmd *cmd;
1557 struct mgmt_mode *cp;
1558 struct hci_request req;
1561 BT_DBG("status 0x%02x", status);
1565 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1570 u8 mgmt_err = mgmt_status(status);
1571 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1572 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1578 changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1580 if (hdev->discov_timeout > 0) {
1581 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1582 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1586 changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1589 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1592 new_settings(hdev, cmd->sk);
1594 /* When the discoverable mode gets changed, make sure
1595 * that class of device has the limited discoverable
1596 * bit correctly set. Also update page scan based on whitelist
1599 hci_req_init(&req, hdev);
1600 __hci_update_page_scan(&req);
1602 hci_req_run(&req, NULL);
1605 mgmt_pending_remove(cmd);
1608 hci_dev_unlock(hdev);
1611 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1614 struct mgmt_cp_set_discoverable *cp = data;
1615 struct mgmt_pending_cmd *cmd;
1616 struct hci_request req;
1621 BT_DBG("request for %s", hdev->name);
1623 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1624 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1625 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1626 MGMT_STATUS_REJECTED);
1628 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1629 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1630 MGMT_STATUS_INVALID_PARAMS);
1632 timeout = __le16_to_cpu(cp->timeout);
1634 /* Disabling discoverable requires that no timeout is set,
1635 * and enabling limited discoverable requires a timeout.
1637 if ((cp->val == 0x00 && timeout > 0) ||
1638 (cp->val == 0x02 && timeout == 0))
1639 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1640 MGMT_STATUS_INVALID_PARAMS);
1644 if (!hdev_is_powered(hdev) && timeout > 0) {
1645 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1646 MGMT_STATUS_NOT_POWERED);
1650 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1651 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1652 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1657 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1658 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1659 MGMT_STATUS_REJECTED);
1663 if (!hdev_is_powered(hdev)) {
1664 bool changed = false;
1666 /* Setting limited discoverable when powered off is
1667 * not a valid operation since it requires a timeout
1668 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1670 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1671 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1675 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1680 err = new_settings(hdev, sk);
1685 /* If the current mode is the same, then just update the timeout
1686 * value with the new value. And if only the timeout gets updated,
1687 * then no need for any HCI transactions.
1689 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1690 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1691 HCI_LIMITED_DISCOVERABLE)) {
1692 cancel_delayed_work(&hdev->discov_off);
1693 hdev->discov_timeout = timeout;
1695 if (cp->val && hdev->discov_timeout > 0) {
1696 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1697 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1701 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1705 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1711 /* Cancel any potential discoverable timeout that might be
1712 * still active and store new timeout value. The arming of
1713 * the timeout happens in the complete handler.
1715 cancel_delayed_work(&hdev->discov_off);
1716 hdev->discov_timeout = timeout;
1718 /* Limited discoverable mode */
1719 if (cp->val == 0x02)
1720 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1722 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1724 hci_req_init(&req, hdev);
1726 /* The procedure for LE-only controllers is much simpler - just
1727 * update the advertising data.
1729 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1735 struct hci_cp_write_current_iac_lap hci_cp;
1737 if (cp->val == 0x02) {
1738 /* Limited discoverable mode */
1739 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1740 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1741 hci_cp.iac_lap[1] = 0x8b;
1742 hci_cp.iac_lap[2] = 0x9e;
1743 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1744 hci_cp.iac_lap[4] = 0x8b;
1745 hci_cp.iac_lap[5] = 0x9e;
1747 /* General discoverable mode */
1749 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1750 hci_cp.iac_lap[1] = 0x8b;
1751 hci_cp.iac_lap[2] = 0x9e;
1754 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1755 (hci_cp.num_iac * 3) + 1, &hci_cp);
1757 scan |= SCAN_INQUIRY;
1759 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1762 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1765 update_adv_data(&req);
1767 err = hci_req_run(&req, set_discoverable_complete);
1769 mgmt_pending_remove(cmd);
1772 hci_dev_unlock(hdev);
1776 static void write_fast_connectable(struct hci_request *req, bool enable)
1778 struct hci_dev *hdev = req->hdev;
1779 struct hci_cp_write_page_scan_activity acp;
1782 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1785 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1789 type = PAGE_SCAN_TYPE_INTERLACED;
1791 /* 160 msec page scan interval */
1792 acp.interval = cpu_to_le16(0x0100);
1794 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1796 /* default 1.28 sec page scan */
1797 acp.interval = cpu_to_le16(0x0800);
1800 acp.window = cpu_to_le16(0x0012);
1802 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1803 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1804 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1807 if (hdev->page_scan_type != type)
1808 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1811 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1814 struct mgmt_pending_cmd *cmd;
1815 struct mgmt_mode *cp;
1816 bool conn_changed, discov_changed;
1818 BT_DBG("status 0x%02x", status);
1822 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1827 u8 mgmt_err = mgmt_status(status);
1828 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1834 conn_changed = !hci_dev_test_and_set_flag(hdev,
1836 discov_changed = false;
1838 conn_changed = hci_dev_test_and_clear_flag(hdev,
1840 discov_changed = hci_dev_test_and_clear_flag(hdev,
1844 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1846 if (conn_changed || discov_changed) {
1847 new_settings(hdev, cmd->sk);
1848 hci_update_page_scan(hdev);
1850 mgmt_update_adv_data(hdev);
1851 hci_update_background_scan(hdev);
1855 mgmt_pending_remove(cmd);
1858 hci_dev_unlock(hdev);
1861 static int set_connectable_update_settings(struct hci_dev *hdev,
1862 struct sock *sk, u8 val)
1864 bool changed = false;
1867 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1871 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1873 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1874 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1877 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1882 hci_update_page_scan(hdev);
1883 hci_update_background_scan(hdev);
1884 return new_settings(hdev, sk);
1890 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1893 struct mgmt_mode *cp = data;
1894 struct mgmt_pending_cmd *cmd;
1895 struct hci_request req;
1899 BT_DBG("request for %s", hdev->name);
1901 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1902 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1903 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1904 MGMT_STATUS_REJECTED);
1906 if (cp->val != 0x00 && cp->val != 0x01)
1907 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1908 MGMT_STATUS_INVALID_PARAMS);
1912 if (!hdev_is_powered(hdev)) {
1913 err = set_connectable_update_settings(hdev, sk, cp->val);
1917 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1918 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1919 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1924 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1930 hci_req_init(&req, hdev);
1932 /* If BR/EDR is not enabled and we disable advertising as a
1933 * by-product of disabling connectable, we need to update the
1934 * advertising flags.
1936 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1938 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1939 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1941 update_adv_data(&req);
1942 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1946 /* If we don't have any whitelist entries just
1947 * disable all scanning. If there are entries
1948 * and we had both page and inquiry scanning
1949 * enabled then fall back to only page scanning.
1950 * Otherwise no changes are needed.
1952 if (list_empty(&hdev->whitelist))
1953 scan = SCAN_DISABLED;
1954 else if (test_bit(HCI_ISCAN, &hdev->flags))
1957 goto no_scan_update;
1959 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1960 hdev->discov_timeout > 0)
1961 cancel_delayed_work(&hdev->discov_off);
1964 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1968 /* Update the advertising parameters if necessary */
1969 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
1970 enable_advertising(&req);
1972 err = hci_req_run(&req, set_connectable_complete);
1974 mgmt_pending_remove(cmd);
1975 if (err == -ENODATA)
1976 err = set_connectable_update_settings(hdev, sk,
1982 hci_dev_unlock(hdev);
1986 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1989 struct mgmt_mode *cp = data;
1993 BT_DBG("request for %s", hdev->name);
1995 if (cp->val != 0x00 && cp->val != 0x01)
1996 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1997 MGMT_STATUS_INVALID_PARAMS);
2002 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2004 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2006 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2011 err = new_settings(hdev, sk);
2014 hci_dev_unlock(hdev);
2018 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2021 struct mgmt_mode *cp = data;
2022 struct mgmt_pending_cmd *cmd;
2026 BT_DBG("request for %s", hdev->name);
2028 status = mgmt_bredr_support(hdev);
2030 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2033 if (cp->val != 0x00 && cp->val != 0x01)
2034 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2035 MGMT_STATUS_INVALID_PARAMS);
2039 if (!hdev_is_powered(hdev)) {
2040 bool changed = false;
2042 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2043 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2047 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2052 err = new_settings(hdev, sk);
2057 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2058 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2065 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2066 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2070 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2076 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2078 mgmt_pending_remove(cmd);
2083 hci_dev_unlock(hdev);
2087 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2089 struct mgmt_mode *cp = data;
2090 struct mgmt_pending_cmd *cmd;
2094 BT_DBG("request for %s", hdev->name);
2096 status = mgmt_bredr_support(hdev);
2098 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2100 if (!lmp_ssp_capable(hdev))
2101 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2102 MGMT_STATUS_NOT_SUPPORTED);
2104 if (cp->val != 0x00 && cp->val != 0x01)
2105 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2106 MGMT_STATUS_INVALID_PARAMS);
2110 if (!hdev_is_powered(hdev)) {
2114 changed = !hci_dev_test_and_set_flag(hdev,
2117 changed = hci_dev_test_and_clear_flag(hdev,
2120 changed = hci_dev_test_and_clear_flag(hdev,
2123 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2126 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2131 err = new_settings(hdev, sk);
2136 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2137 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2142 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2143 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2147 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2153 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2154 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2155 sizeof(cp->val), &cp->val);
2157 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2159 mgmt_pending_remove(cmd);
2164 hci_dev_unlock(hdev);
2168 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2170 struct mgmt_mode *cp = data;
2175 BT_DBG("request for %s", hdev->name);
2177 status = mgmt_bredr_support(hdev);
2179 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2181 if (!lmp_ssp_capable(hdev))
2182 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2183 MGMT_STATUS_NOT_SUPPORTED);
2185 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2186 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2187 MGMT_STATUS_REJECTED);
2189 if (cp->val != 0x00 && cp->val != 0x01)
2190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2191 MGMT_STATUS_INVALID_PARAMS);
2195 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2196 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2202 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2204 if (hdev_is_powered(hdev)) {
2205 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2206 MGMT_STATUS_REJECTED);
2210 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2213 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2218 err = new_settings(hdev, sk);
2221 hci_dev_unlock(hdev);
2225 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2227 struct cmd_lookup match = { NULL, hdev };
2232 u8 mgmt_err = mgmt_status(status);
2234 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2239 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2241 new_settings(hdev, match.sk);
2246 /* Make sure the controller has a good default for
2247 * advertising data. Restrict the update to when LE
2248 * has actually been enabled. During power on, the
2249 * update in powered_update_hci will take care of it.
2251 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2252 struct hci_request req;
2254 hci_req_init(&req, hdev);
2255 update_adv_data(&req);
2256 update_scan_rsp_data(&req);
2257 __hci_update_background_scan(&req);
2258 hci_req_run(&req, NULL);
2262 hci_dev_unlock(hdev);
2265 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2267 struct mgmt_mode *cp = data;
2268 struct hci_cp_write_le_host_supported hci_cp;
2269 struct mgmt_pending_cmd *cmd;
2270 struct hci_request req;
2274 BT_DBG("request for %s", hdev->name);
2276 if (!lmp_le_capable(hdev))
2277 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2278 MGMT_STATUS_NOT_SUPPORTED);
2280 if (cp->val != 0x00 && cp->val != 0x01)
2281 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2282 MGMT_STATUS_INVALID_PARAMS);
2284 /* Bluetooth single mode LE only controllers or dual-mode
2285 * controllers configured as LE only devices, do not allow
2286 * switching LE off. These have either LE enabled explicitly
2287 * or BR/EDR has been previously switched off.
2289 * When trying to enable an already enabled LE, then gracefully
2290 * send a positive response. Trying to disable it however will
2291 * result into rejection.
2293 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2294 if (cp->val == 0x01)
2295 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2297 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2298 MGMT_STATUS_REJECTED);
2304 enabled = lmp_host_le_capable(hdev);
2306 if (!hdev_is_powered(hdev) || val == enabled) {
2307 bool changed = false;
2309 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2310 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2314 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2315 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2319 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2324 err = new_settings(hdev, sk);
2329 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2330 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2331 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2336 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2342 hci_req_init(&req, hdev);
2344 memset(&hci_cp, 0, sizeof(hci_cp));
2348 hci_cp.simul = 0x00;
2350 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2351 disable_advertising(&req);
2354 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2357 err = hci_req_run(&req, le_enable_complete);
2359 mgmt_pending_remove(cmd);
2362 hci_dev_unlock(hdev);
2366 /* This is a helper function to test for pending mgmt commands that can
2367 * cause CoD or EIR HCI commands. We can only allow one such pending
2368 * mgmt command at a time since otherwise we cannot easily track what
2369 * the current values are, will be, and based on that calculate if a new
2370 * HCI command needs to be sent and if yes with what value.
2372 static bool pending_eir_or_class(struct hci_dev *hdev)
2374 struct mgmt_pending_cmd *cmd;
2376 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2377 switch (cmd->opcode) {
2378 case MGMT_OP_ADD_UUID:
2379 case MGMT_OP_REMOVE_UUID:
2380 case MGMT_OP_SET_DEV_CLASS:
2381 case MGMT_OP_SET_POWERED:
2389 static const u8 bluetooth_base_uuid[] = {
2390 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2391 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2394 static u8 get_uuid_size(const u8 *uuid)
2398 if (memcmp(uuid, bluetooth_base_uuid, 12))
2401 val = get_unaligned_le32(&uuid[12]);
2408 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2410 struct mgmt_pending_cmd *cmd;
2414 cmd = pending_find(mgmt_op, hdev);
2418 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2419 mgmt_status(status), hdev->dev_class, 3);
2421 mgmt_pending_remove(cmd);
2424 hci_dev_unlock(hdev);
2427 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2429 BT_DBG("status 0x%02x", status);
2431 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2434 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2436 struct mgmt_cp_add_uuid *cp = data;
2437 struct mgmt_pending_cmd *cmd;
2438 struct hci_request req;
2439 struct bt_uuid *uuid;
2442 BT_DBG("request for %s", hdev->name);
2446 if (pending_eir_or_class(hdev)) {
2447 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2452 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2458 memcpy(uuid->uuid, cp->uuid, 16);
2459 uuid->svc_hint = cp->svc_hint;
2460 uuid->size = get_uuid_size(cp->uuid);
2462 list_add_tail(&uuid->list, &hdev->uuids);
2464 hci_req_init(&req, hdev);
2469 err = hci_req_run(&req, add_uuid_complete);
2471 if (err != -ENODATA)
2474 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2475 hdev->dev_class, 3);
2479 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2488 hci_dev_unlock(hdev);
2492 static bool enable_service_cache(struct hci_dev *hdev)
2494 if (!hdev_is_powered(hdev))
2497 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2498 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2506 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2508 BT_DBG("status 0x%02x", status);
2510 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2513 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2516 struct mgmt_cp_remove_uuid *cp = data;
2517 struct mgmt_pending_cmd *cmd;
2518 struct bt_uuid *match, *tmp;
2519 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2520 struct hci_request req;
2523 BT_DBG("request for %s", hdev->name);
2527 if (pending_eir_or_class(hdev)) {
2528 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2533 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2534 hci_uuids_clear(hdev);
2536 if (enable_service_cache(hdev)) {
2537 err = mgmt_cmd_complete(sk, hdev->id,
2538 MGMT_OP_REMOVE_UUID,
2539 0, hdev->dev_class, 3);
2548 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2549 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2552 list_del(&match->list);
2558 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2559 MGMT_STATUS_INVALID_PARAMS);
2564 hci_req_init(&req, hdev);
2569 err = hci_req_run(&req, remove_uuid_complete);
2571 if (err != -ENODATA)
2574 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2575 hdev->dev_class, 3);
2579 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2588 hci_dev_unlock(hdev);
2592 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2594 BT_DBG("status 0x%02x", status);
2596 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2599 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2602 struct mgmt_cp_set_dev_class *cp = data;
2603 struct mgmt_pending_cmd *cmd;
2604 struct hci_request req;
2607 BT_DBG("request for %s", hdev->name);
2609 if (!lmp_bredr_capable(hdev))
2610 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2611 MGMT_STATUS_NOT_SUPPORTED);
2615 if (pending_eir_or_class(hdev)) {
2616 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2621 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2622 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2623 MGMT_STATUS_INVALID_PARAMS);
2627 hdev->major_class = cp->major;
2628 hdev->minor_class = cp->minor;
2630 if (!hdev_is_powered(hdev)) {
2631 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2632 hdev->dev_class, 3);
2636 hci_req_init(&req, hdev);
2638 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2639 hci_dev_unlock(hdev);
2640 cancel_delayed_work_sync(&hdev->service_cache);
2647 err = hci_req_run(&req, set_class_complete);
2649 if (err != -ENODATA)
2652 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2653 hdev->dev_class, 3);
2657 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2666 hci_dev_unlock(hdev);
2670 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2673 struct mgmt_cp_load_link_keys *cp = data;
2674 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2675 sizeof(struct mgmt_link_key_info));
2676 u16 key_count, expected_len;
2680 BT_DBG("request for %s", hdev->name);
2682 if (!lmp_bredr_capable(hdev))
2683 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2684 MGMT_STATUS_NOT_SUPPORTED);
2686 key_count = __le16_to_cpu(cp->key_count);
2687 if (key_count > max_key_count) {
2688 BT_ERR("load_link_keys: too big key_count value %u",
2690 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2691 MGMT_STATUS_INVALID_PARAMS);
2694 expected_len = sizeof(*cp) + key_count *
2695 sizeof(struct mgmt_link_key_info);
2696 if (expected_len != len) {
2697 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2699 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2700 MGMT_STATUS_INVALID_PARAMS);
2703 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2704 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2705 MGMT_STATUS_INVALID_PARAMS);
2707 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2710 for (i = 0; i < key_count; i++) {
2711 struct mgmt_link_key_info *key = &cp->keys[i];
2713 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2714 return mgmt_cmd_status(sk, hdev->id,
2715 MGMT_OP_LOAD_LINK_KEYS,
2716 MGMT_STATUS_INVALID_PARAMS);
2721 hci_link_keys_clear(hdev);
2724 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2726 changed = hci_dev_test_and_clear_flag(hdev,
2727 HCI_KEEP_DEBUG_KEYS);
2730 new_settings(hdev, NULL);
2732 for (i = 0; i < key_count; i++) {
2733 struct mgmt_link_key_info *key = &cp->keys[i];
2735 /* Always ignore debug keys and require a new pairing if
2736 * the user wants to use them.
2738 if (key->type == HCI_LK_DEBUG_COMBINATION)
2741 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2742 key->type, key->pin_len, NULL);
2745 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2747 hci_dev_unlock(hdev);
2752 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2753 u8 addr_type, struct sock *skip_sk)
2755 struct mgmt_ev_device_unpaired ev;
2757 bacpy(&ev.addr.bdaddr, bdaddr);
2758 ev.addr.type = addr_type;
2760 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2764 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2767 struct mgmt_cp_unpair_device *cp = data;
2768 struct mgmt_rp_unpair_device rp;
2769 struct hci_cp_disconnect dc;
2770 struct mgmt_pending_cmd *cmd;
2771 struct hci_conn *conn;
2774 memset(&rp, 0, sizeof(rp));
2775 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2776 rp.addr.type = cp->addr.type;
2778 if (!bdaddr_type_is_valid(cp->addr.type))
2779 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2780 MGMT_STATUS_INVALID_PARAMS,
2783 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2784 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2785 MGMT_STATUS_INVALID_PARAMS,
2790 if (!hdev_is_powered(hdev)) {
2791 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2792 MGMT_STATUS_NOT_POWERED, &rp,
2797 if (cp->addr.type == BDADDR_BREDR) {
2798 /* If disconnection is requested, then look up the
2799 * connection. If the remote device is connected, it
2800 * will be later used to terminate the link.
2802 * Setting it to NULL explicitly will cause no
2803 * termination of the link.
2806 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2811 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2815 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2818 /* Defer clearing up the connection parameters
2819 * until closing to give a chance of keeping
2820 * them if a repairing happens.
2822 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2824 /* If disconnection is not requested, then
2825 * clear the connection variable so that the
2826 * link is not terminated.
2828 if (!cp->disconnect)
2832 if (cp->addr.type == BDADDR_LE_PUBLIC)
2833 addr_type = ADDR_LE_DEV_PUBLIC;
2835 addr_type = ADDR_LE_DEV_RANDOM;
2837 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2839 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2843 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2844 MGMT_STATUS_NOT_PAIRED, &rp,
2849 /* If the connection variable is set, then termination of the
2850 * link is requested.
2853 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2855 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2859 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2866 cmd->cmd_complete = addr_cmd_complete;
2868 dc.handle = cpu_to_le16(conn->handle);
2869 dc.reason = 0x13; /* Remote User Terminated Connection */
2870 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2872 mgmt_pending_remove(cmd);
2875 hci_dev_unlock(hdev);
2879 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2882 struct mgmt_cp_disconnect *cp = data;
2883 struct mgmt_rp_disconnect rp;
2884 struct mgmt_pending_cmd *cmd;
2885 struct hci_conn *conn;
2890 memset(&rp, 0, sizeof(rp));
2891 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2892 rp.addr.type = cp->addr.type;
2894 if (!bdaddr_type_is_valid(cp->addr.type))
2895 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2896 MGMT_STATUS_INVALID_PARAMS,
2901 if (!test_bit(HCI_UP, &hdev->flags)) {
2902 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2903 MGMT_STATUS_NOT_POWERED, &rp,
2908 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2909 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2910 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2914 if (cp->addr.type == BDADDR_BREDR)
2915 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2918 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2920 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2921 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2922 MGMT_STATUS_NOT_CONNECTED, &rp,
2927 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2933 cmd->cmd_complete = generic_cmd_complete;
2935 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2937 mgmt_pending_remove(cmd);
2940 hci_dev_unlock(hdev);
2944 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2946 switch (link_type) {
2948 switch (addr_type) {
2949 case ADDR_LE_DEV_PUBLIC:
2950 return BDADDR_LE_PUBLIC;
2953 /* Fallback to LE Random address type */
2954 return BDADDR_LE_RANDOM;
2958 /* Fallback to BR/EDR type */
2959 return BDADDR_BREDR;
2963 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2966 struct mgmt_rp_get_connections *rp;
2976 if (!hdev_is_powered(hdev)) {
2977 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2978 MGMT_STATUS_NOT_POWERED);
2983 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2984 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2988 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2989 rp = kmalloc(rp_len, GFP_KERNEL);
2996 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2997 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2999 bacpy(&rp->addr[i].bdaddr, &c->dst);
3000 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3001 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3006 rp->conn_count = cpu_to_le16(i);
3008 /* Recalculate length in case of filtered SCO connections, etc */
3009 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3011 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3017 hci_dev_unlock(hdev);
3021 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3022 struct mgmt_cp_pin_code_neg_reply *cp)
3024 struct mgmt_pending_cmd *cmd;
3027 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3032 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3033 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3035 mgmt_pending_remove(cmd);
3040 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3043 struct hci_conn *conn;
3044 struct mgmt_cp_pin_code_reply *cp = data;
3045 struct hci_cp_pin_code_reply reply;
3046 struct mgmt_pending_cmd *cmd;
3053 if (!hdev_is_powered(hdev)) {
3054 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3055 MGMT_STATUS_NOT_POWERED);
3059 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3061 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3062 MGMT_STATUS_NOT_CONNECTED);
3066 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3067 struct mgmt_cp_pin_code_neg_reply ncp;
3069 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3071 BT_ERR("PIN code is not 16 bytes long");
3073 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3075 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3076 MGMT_STATUS_INVALID_PARAMS);
3081 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3087 cmd->cmd_complete = addr_cmd_complete;
3089 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3090 reply.pin_len = cp->pin_len;
3091 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3093 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3095 mgmt_pending_remove(cmd);
3098 hci_dev_unlock(hdev);
3102 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3105 struct mgmt_cp_set_io_capability *cp = data;
3109 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3110 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3111 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3115 hdev->io_capability = cp->io_capability;
3117 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3118 hdev->io_capability);
3120 hci_dev_unlock(hdev);
3122 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3126 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3128 struct hci_dev *hdev = conn->hdev;
3129 struct mgmt_pending_cmd *cmd;
3131 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3132 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3135 if (cmd->user_data != conn)
3144 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3146 struct mgmt_rp_pair_device rp;
3147 struct hci_conn *conn = cmd->user_data;
3150 bacpy(&rp.addr.bdaddr, &conn->dst);
3151 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3153 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3154 status, &rp, sizeof(rp));
3156 /* So we don't get further callbacks for this connection */
3157 conn->connect_cfm_cb = NULL;
3158 conn->security_cfm_cb = NULL;
3159 conn->disconn_cfm_cb = NULL;
3161 hci_conn_drop(conn);
3163 /* The device is paired so there is no need to remove
3164 * its connection parameters anymore.
3166 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3173 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3175 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3176 struct mgmt_pending_cmd *cmd;
3178 cmd = find_pairing(conn);
3180 cmd->cmd_complete(cmd, status);
3181 mgmt_pending_remove(cmd);
3185 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3187 struct mgmt_pending_cmd *cmd;
3189 BT_DBG("status %u", status);
3191 cmd = find_pairing(conn);
3193 BT_DBG("Unable to find a pending command");
3197 cmd->cmd_complete(cmd, mgmt_status(status));
3198 mgmt_pending_remove(cmd);
3201 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3203 struct mgmt_pending_cmd *cmd;
3205 BT_DBG("status %u", status);
3210 cmd = find_pairing(conn);
3212 BT_DBG("Unable to find a pending command");
3216 cmd->cmd_complete(cmd, mgmt_status(status));
3217 mgmt_pending_remove(cmd);
3220 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3223 struct mgmt_cp_pair_device *cp = data;
3224 struct mgmt_rp_pair_device rp;
3225 struct mgmt_pending_cmd *cmd;
3226 u8 sec_level, auth_type;
3227 struct hci_conn *conn;
3232 memset(&rp, 0, sizeof(rp));
3233 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3234 rp.addr.type = cp->addr.type;
3236 if (!bdaddr_type_is_valid(cp->addr.type))
3237 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3238 MGMT_STATUS_INVALID_PARAMS,
3241 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3242 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3243 MGMT_STATUS_INVALID_PARAMS,
3248 if (!hdev_is_powered(hdev)) {
3249 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3250 MGMT_STATUS_NOT_POWERED, &rp,
3255 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3256 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3257 MGMT_STATUS_ALREADY_PAIRED, &rp,
3262 sec_level = BT_SECURITY_MEDIUM;
3263 auth_type = HCI_AT_DEDICATED_BONDING;
3265 if (cp->addr.type == BDADDR_BREDR) {
3266 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3271 /* Convert from L2CAP channel address type to HCI address type
3273 if (cp->addr.type == BDADDR_LE_PUBLIC)
3274 addr_type = ADDR_LE_DEV_PUBLIC;
3276 addr_type = ADDR_LE_DEV_RANDOM;
3278 /* When pairing a new device, it is expected to remember
3279 * this device for future connections. Adding the connection
3280 * parameter information ahead of time allows tracking
3281 * of the slave preferred values and will speed up any
3282 * further connection establishment.
3284 * If connection parameters already exist, then they
3285 * will be kept and this function does nothing.
3287 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3289 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3290 sec_level, HCI_LE_CONN_TIMEOUT,
3297 if (PTR_ERR(conn) == -EBUSY)
3298 status = MGMT_STATUS_BUSY;
3299 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3300 status = MGMT_STATUS_NOT_SUPPORTED;
3301 else if (PTR_ERR(conn) == -ECONNREFUSED)
3302 status = MGMT_STATUS_REJECTED;
3304 status = MGMT_STATUS_CONNECT_FAILED;
3306 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3307 status, &rp, sizeof(rp));
3311 if (conn->connect_cfm_cb) {
3312 hci_conn_drop(conn);
3313 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3314 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3318 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3321 hci_conn_drop(conn);
3325 cmd->cmd_complete = pairing_complete;
3327 /* For LE, just connecting isn't a proof that the pairing finished */
3328 if (cp->addr.type == BDADDR_BREDR) {
3329 conn->connect_cfm_cb = pairing_complete_cb;
3330 conn->security_cfm_cb = pairing_complete_cb;
3331 conn->disconn_cfm_cb = pairing_complete_cb;
3333 conn->connect_cfm_cb = le_pairing_complete_cb;
3334 conn->security_cfm_cb = le_pairing_complete_cb;
3335 conn->disconn_cfm_cb = le_pairing_complete_cb;
3338 conn->io_capability = cp->io_cap;
3339 cmd->user_data = hci_conn_get(conn);
3341 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3342 hci_conn_security(conn, sec_level, auth_type, true)) {
3343 cmd->cmd_complete(cmd, 0);
3344 mgmt_pending_remove(cmd);
3350 hci_dev_unlock(hdev);
3354 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3357 struct mgmt_addr_info *addr = data;
3358 struct mgmt_pending_cmd *cmd;
3359 struct hci_conn *conn;
3366 if (!hdev_is_powered(hdev)) {
3367 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3368 MGMT_STATUS_NOT_POWERED);
3372 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3374 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3375 MGMT_STATUS_INVALID_PARAMS);
3379 conn = cmd->user_data;
3381 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3382 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3383 MGMT_STATUS_INVALID_PARAMS);
3387 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3388 mgmt_pending_remove(cmd);
3390 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3391 addr, sizeof(*addr));
3393 hci_dev_unlock(hdev);
3397 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3398 struct mgmt_addr_info *addr, u16 mgmt_op,
3399 u16 hci_op, __le32 passkey)
3401 struct mgmt_pending_cmd *cmd;
3402 struct hci_conn *conn;
3407 if (!hdev_is_powered(hdev)) {
3408 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3409 MGMT_STATUS_NOT_POWERED, addr,
3414 if (addr->type == BDADDR_BREDR)
3415 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3417 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3420 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3421 MGMT_STATUS_NOT_CONNECTED, addr,
3426 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3427 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3429 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3430 MGMT_STATUS_SUCCESS, addr,
3433 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3434 MGMT_STATUS_FAILED, addr,
3440 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3446 cmd->cmd_complete = addr_cmd_complete;
3448 /* Continue with pairing via HCI */
3449 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3450 struct hci_cp_user_passkey_reply cp;
3452 bacpy(&cp.bdaddr, &addr->bdaddr);
3453 cp.passkey = passkey;
3454 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3456 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3460 mgmt_pending_remove(cmd);
3463 hci_dev_unlock(hdev);
3467 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3468 void *data, u16 len)
3470 struct mgmt_cp_pin_code_neg_reply *cp = data;
3474 return user_pairing_resp(sk, hdev, &cp->addr,
3475 MGMT_OP_PIN_CODE_NEG_REPLY,
3476 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3479 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3482 struct mgmt_cp_user_confirm_reply *cp = data;
3486 if (len != sizeof(*cp))
3487 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3488 MGMT_STATUS_INVALID_PARAMS);
3490 return user_pairing_resp(sk, hdev, &cp->addr,
3491 MGMT_OP_USER_CONFIRM_REPLY,
3492 HCI_OP_USER_CONFIRM_REPLY, 0);
3495 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3496 void *data, u16 len)
3498 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3502 return user_pairing_resp(sk, hdev, &cp->addr,
3503 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3504 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3507 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3510 struct mgmt_cp_user_passkey_reply *cp = data;
3514 return user_pairing_resp(sk, hdev, &cp->addr,
3515 MGMT_OP_USER_PASSKEY_REPLY,
3516 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3519 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3520 void *data, u16 len)
3522 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3526 return user_pairing_resp(sk, hdev, &cp->addr,
3527 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3528 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3531 static void update_name(struct hci_request *req)
3533 struct hci_dev *hdev = req->hdev;
3534 struct hci_cp_write_local_name cp;
3536 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3538 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3541 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3543 struct mgmt_cp_set_local_name *cp;
3544 struct mgmt_pending_cmd *cmd;
3546 BT_DBG("status 0x%02x", status);
3550 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3557 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3558 mgmt_status(status));
3560 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3563 mgmt_pending_remove(cmd);
3566 hci_dev_unlock(hdev);
3569 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3572 struct mgmt_cp_set_local_name *cp = data;
3573 struct mgmt_pending_cmd *cmd;
3574 struct hci_request req;
3581 /* If the old values are the same as the new ones just return a
3582 * direct command complete event.
3584 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3585 !memcmp(hdev->short_name, cp->short_name,
3586 sizeof(hdev->short_name))) {
3587 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3592 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3594 if (!hdev_is_powered(hdev)) {
3595 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3597 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3602 err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3608 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3614 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3616 hci_req_init(&req, hdev);
3618 if (lmp_bredr_capable(hdev)) {
3623 /* The name is stored in the scan response data and so
3624 * no need to udpate the advertising data here.
3626 if (lmp_le_capable(hdev))
3627 update_scan_rsp_data(&req);
3629 err = hci_req_run(&req, set_name_complete);
3631 mgmt_pending_remove(cmd);
3634 hci_dev_unlock(hdev);
3638 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3639 void *data, u16 data_len)
3641 struct mgmt_pending_cmd *cmd;
3644 BT_DBG("%s", hdev->name);
3648 if (!hdev_is_powered(hdev)) {
3649 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3650 MGMT_STATUS_NOT_POWERED);
3654 if (!lmp_ssp_capable(hdev)) {
3655 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3656 MGMT_STATUS_NOT_SUPPORTED);
3660 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3661 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3666 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3672 if (bredr_sc_enabled(hdev))
3673 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3676 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3679 mgmt_pending_remove(cmd);
3682 hci_dev_unlock(hdev);
3686 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3687 void *data, u16 len)
3689 struct mgmt_addr_info *addr = data;
3692 BT_DBG("%s ", hdev->name);
3694 if (!bdaddr_type_is_valid(addr->type))
3695 return mgmt_cmd_complete(sk, hdev->id,
3696 MGMT_OP_ADD_REMOTE_OOB_DATA,
3697 MGMT_STATUS_INVALID_PARAMS,
3698 addr, sizeof(*addr));
3702 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3703 struct mgmt_cp_add_remote_oob_data *cp = data;
3706 if (cp->addr.type != BDADDR_BREDR) {
3707 err = mgmt_cmd_complete(sk, hdev->id,
3708 MGMT_OP_ADD_REMOTE_OOB_DATA,
3709 MGMT_STATUS_INVALID_PARAMS,
3710 &cp->addr, sizeof(cp->addr));
3714 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3715 cp->addr.type, cp->hash,
3716 cp->rand, NULL, NULL);
3718 status = MGMT_STATUS_FAILED;
3720 status = MGMT_STATUS_SUCCESS;
3722 err = mgmt_cmd_complete(sk, hdev->id,
3723 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3724 &cp->addr, sizeof(cp->addr));
3725 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3726 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3727 u8 *rand192, *hash192, *rand256, *hash256;
3730 if (bdaddr_type_is_le(cp->addr.type)) {
3731 /* Enforce zero-valued 192-bit parameters as
3732 * long as legacy SMP OOB isn't implemented.
3734 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3735 memcmp(cp->hash192, ZERO_KEY, 16)) {
3736 err = mgmt_cmd_complete(sk, hdev->id,
3737 MGMT_OP_ADD_REMOTE_OOB_DATA,
3738 MGMT_STATUS_INVALID_PARAMS,
3739 addr, sizeof(*addr));
3746 /* In case one of the P-192 values is set to zero,
3747 * then just disable OOB data for P-192.
3749 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3750 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3754 rand192 = cp->rand192;
3755 hash192 = cp->hash192;
3759 /* In case one of the P-256 values is set to zero, then just
3760 * disable OOB data for P-256.
3762 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3763 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3767 rand256 = cp->rand256;
3768 hash256 = cp->hash256;
3771 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3772 cp->addr.type, hash192, rand192,
3775 status = MGMT_STATUS_FAILED;
3777 status = MGMT_STATUS_SUCCESS;
3779 err = mgmt_cmd_complete(sk, hdev->id,
3780 MGMT_OP_ADD_REMOTE_OOB_DATA,
3781 status, &cp->addr, sizeof(cp->addr));
3783 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3784 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3785 MGMT_STATUS_INVALID_PARAMS);
3789 hci_dev_unlock(hdev);
3793 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3794 void *data, u16 len)
3796 struct mgmt_cp_remove_remote_oob_data *cp = data;
3800 BT_DBG("%s", hdev->name);
3802 if (cp->addr.type != BDADDR_BREDR)
3803 return mgmt_cmd_complete(sk, hdev->id,
3804 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3805 MGMT_STATUS_INVALID_PARAMS,
3806 &cp->addr, sizeof(cp->addr));
3810 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3811 hci_remote_oob_data_clear(hdev);
3812 status = MGMT_STATUS_SUCCESS;
3816 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3818 status = MGMT_STATUS_INVALID_PARAMS;
3820 status = MGMT_STATUS_SUCCESS;
3823 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3824 status, &cp->addr, sizeof(cp->addr));
3826 hci_dev_unlock(hdev);
3830 static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
3832 struct hci_dev *hdev = req->hdev;
3833 struct hci_cp_inquiry cp;
3834 /* General inquiry access code (GIAC) */
3835 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3837 *status = mgmt_bredr_support(hdev);
3841 if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
3842 *status = MGMT_STATUS_BUSY;
3846 hci_inquiry_cache_flush(hdev);
3848 memset(&cp, 0, sizeof(cp));
3849 memcpy(&cp.lap, lap, sizeof(cp.lap));
3850 cp.length = DISCOV_BREDR_INQUIRY_LEN;
3852 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3857 static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
3859 struct hci_dev *hdev = req->hdev;
3860 struct hci_cp_le_set_scan_param param_cp;
3861 struct hci_cp_le_set_scan_enable enable_cp;
3865 *status = mgmt_le_support(hdev);
3869 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
3870 /* Don't let discovery abort an outgoing connection attempt
3871 * that's using directed advertising.
3873 if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3874 *status = MGMT_STATUS_REJECTED;
3878 disable_advertising(req);
3881 /* If controller is scanning, it means the background scanning is
3882 * running. Thus, we should temporarily stop it in order to set the
3883 * discovery scanning parameters.
3885 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
3886 hci_req_add_le_scan_disable(req);
3888 /* All active scans will be done with either a resolvable private
3889 * address (when privacy feature has been enabled) or non-resolvable
3892 err = hci_update_random_address(req, true, &own_addr_type);
3894 *status = MGMT_STATUS_FAILED;
3898 memset(¶m_cp, 0, sizeof(param_cp));
3899 param_cp.type = LE_SCAN_ACTIVE;
3900 param_cp.interval = cpu_to_le16(interval);
3901 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3902 param_cp.own_address_type = own_addr_type;
3904 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3907 memset(&enable_cp, 0, sizeof(enable_cp));
3908 enable_cp.enable = LE_SCAN_ENABLE;
3909 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3911 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3917 static bool trigger_discovery(struct hci_request *req, u8 *status)
3919 struct hci_dev *hdev = req->hdev;
3921 switch (hdev->discovery.type) {
3922 case DISCOV_TYPE_BREDR:
3923 if (!trigger_bredr_inquiry(req, status))
3927 case DISCOV_TYPE_INTERLEAVED:
3928 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3930 /* During simultaneous discovery, we double LE scan
3931 * interval. We must leave some time for the controller
3932 * to do BR/EDR inquiry.
3934 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
3938 if (!trigger_bredr_inquiry(req, status))
3944 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
3945 *status = MGMT_STATUS_NOT_SUPPORTED;
3950 case DISCOV_TYPE_LE:
3951 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
3956 *status = MGMT_STATUS_INVALID_PARAMS;
3963 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3966 struct mgmt_pending_cmd *cmd;
3967 unsigned long timeout;
3969 BT_DBG("status %d", status);
3973 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3975 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3978 cmd->cmd_complete(cmd, mgmt_status(status));
3979 mgmt_pending_remove(cmd);
3983 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3987 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3989 /* If the scan involves LE scan, pick proper timeout to schedule
3990 * hdev->le_scan_disable that will stop it.
3992 switch (hdev->discovery.type) {
3993 case DISCOV_TYPE_LE:
3994 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3996 case DISCOV_TYPE_INTERLEAVED:
3997 /* When running simultaneous discovery, the LE scanning time
3998 * should occupy the whole discovery time sine BR/EDR inquiry
3999 * and LE scanning are scheduled by the controller.
4001 * For interleaving discovery in comparison, BR/EDR inquiry
4002 * and LE scanning are done sequentially with separate
4005 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
4006 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4008 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4010 case DISCOV_TYPE_BREDR:
4014 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4020 /* When service discovery is used and the controller has
4021 * a strict duplicate filter, it is important to remember
4022 * the start and duration of the scan. This is required
4023 * for restarting scanning during the discovery phase.
4025 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
4027 hdev->discovery.result_filtering) {
4028 hdev->discovery.scan_start = jiffies;
4029 hdev->discovery.scan_duration = timeout;
4032 queue_delayed_work(hdev->workqueue,
4033 &hdev->le_scan_disable, timeout);
4037 hci_dev_unlock(hdev);
4040 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4041 void *data, u16 len)
4043 struct mgmt_cp_start_discovery *cp = data;
4044 struct mgmt_pending_cmd *cmd;
4045 struct hci_request req;
4049 BT_DBG("%s", hdev->name);
4053 if (!hdev_is_powered(hdev)) {
4054 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4055 MGMT_STATUS_NOT_POWERED,
4056 &cp->type, sizeof(cp->type));
4060 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4061 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4062 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4063 MGMT_STATUS_BUSY, &cp->type,
4068 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4074 cmd->cmd_complete = generic_cmd_complete;
4076 /* Clear the discovery filter first to free any previously
4077 * allocated memory for the UUID list.
4079 hci_discovery_filter_clear(hdev);
4081 hdev->discovery.type = cp->type;
4082 hdev->discovery.report_invalid_rssi = false;
4084 hci_req_init(&req, hdev);
4086 if (!trigger_discovery(&req, &status)) {
4087 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4088 status, &cp->type, sizeof(cp->type));
4089 mgmt_pending_remove(cmd);
4093 err = hci_req_run(&req, start_discovery_complete);
4095 mgmt_pending_remove(cmd);
4099 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4102 hci_dev_unlock(hdev);
4106 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4109 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4113 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4114 void *data, u16 len)
4116 struct mgmt_cp_start_service_discovery *cp = data;
4117 struct mgmt_pending_cmd *cmd;
4118 struct hci_request req;
4119 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4120 u16 uuid_count, expected_len;
4124 BT_DBG("%s", hdev->name);
4128 if (!hdev_is_powered(hdev)) {
4129 err = mgmt_cmd_complete(sk, hdev->id,
4130 MGMT_OP_START_SERVICE_DISCOVERY,
4131 MGMT_STATUS_NOT_POWERED,
4132 &cp->type, sizeof(cp->type));
4136 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4137 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4138 err = mgmt_cmd_complete(sk, hdev->id,
4139 MGMT_OP_START_SERVICE_DISCOVERY,
4140 MGMT_STATUS_BUSY, &cp->type,
4145 uuid_count = __le16_to_cpu(cp->uuid_count);
4146 if (uuid_count > max_uuid_count) {
4147 BT_ERR("service_discovery: too big uuid_count value %u",
4149 err = mgmt_cmd_complete(sk, hdev->id,
4150 MGMT_OP_START_SERVICE_DISCOVERY,
4151 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4156 expected_len = sizeof(*cp) + uuid_count * 16;
4157 if (expected_len != len) {
4158 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4160 err = mgmt_cmd_complete(sk, hdev->id,
4161 MGMT_OP_START_SERVICE_DISCOVERY,
4162 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4167 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4174 cmd->cmd_complete = service_discovery_cmd_complete;
4176 /* Clear the discovery filter first to free any previously
4177 * allocated memory for the UUID list.
4179 hci_discovery_filter_clear(hdev);
4181 hdev->discovery.result_filtering = true;
4182 hdev->discovery.type = cp->type;
4183 hdev->discovery.rssi = cp->rssi;
4184 hdev->discovery.uuid_count = uuid_count;
4186 if (uuid_count > 0) {
4187 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4189 if (!hdev->discovery.uuids) {
4190 err = mgmt_cmd_complete(sk, hdev->id,
4191 MGMT_OP_START_SERVICE_DISCOVERY,
4193 &cp->type, sizeof(cp->type));
4194 mgmt_pending_remove(cmd);
4199 hci_req_init(&req, hdev);
4201 if (!trigger_discovery(&req, &status)) {
4202 err = mgmt_cmd_complete(sk, hdev->id,
4203 MGMT_OP_START_SERVICE_DISCOVERY,
4204 status, &cp->type, sizeof(cp->type));
4205 mgmt_pending_remove(cmd);
4209 err = hci_req_run(&req, start_discovery_complete);
4211 mgmt_pending_remove(cmd);
4215 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4218 hci_dev_unlock(hdev);
4222 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4224 struct mgmt_pending_cmd *cmd;
4226 BT_DBG("status %d", status);
4230 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4232 cmd->cmd_complete(cmd, mgmt_status(status));
4233 mgmt_pending_remove(cmd);
4237 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4239 hci_dev_unlock(hdev);
4242 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4245 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4246 struct mgmt_pending_cmd *cmd;
4247 struct hci_request req;
4250 BT_DBG("%s", hdev->name);
4254 if (!hci_discovery_active(hdev)) {
4255 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4256 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4257 sizeof(mgmt_cp->type));
4261 if (hdev->discovery.type != mgmt_cp->type) {
4262 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4263 MGMT_STATUS_INVALID_PARAMS,
4264 &mgmt_cp->type, sizeof(mgmt_cp->type));
4268 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4274 cmd->cmd_complete = generic_cmd_complete;
4276 hci_req_init(&req, hdev);
4278 hci_stop_discovery(&req);
4280 err = hci_req_run(&req, stop_discovery_complete);
4282 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4286 mgmt_pending_remove(cmd);
4288 /* If no HCI commands were sent we're done */
4289 if (err == -ENODATA) {
4290 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4291 &mgmt_cp->type, sizeof(mgmt_cp->type));
4292 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4296 hci_dev_unlock(hdev);
4300 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4303 struct mgmt_cp_confirm_name *cp = data;
4304 struct inquiry_entry *e;
4307 BT_DBG("%s", hdev->name);
4311 if (!hci_discovery_active(hdev)) {
4312 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4313 MGMT_STATUS_FAILED, &cp->addr,
4318 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4320 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4321 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4326 if (cp->name_known) {
4327 e->name_state = NAME_KNOWN;
4330 e->name_state = NAME_NEEDED;
4331 hci_inquiry_cache_update_resolve(hdev, e);
4334 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4335 &cp->addr, sizeof(cp->addr));
4338 hci_dev_unlock(hdev);
4342 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4345 struct mgmt_cp_block_device *cp = data;
4349 BT_DBG("%s", hdev->name);
4351 if (!bdaddr_type_is_valid(cp->addr.type))
4352 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4353 MGMT_STATUS_INVALID_PARAMS,
4354 &cp->addr, sizeof(cp->addr));
4358 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4361 status = MGMT_STATUS_FAILED;
4365 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4367 status = MGMT_STATUS_SUCCESS;
4370 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4371 &cp->addr, sizeof(cp->addr));
4373 hci_dev_unlock(hdev);
4378 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4381 struct mgmt_cp_unblock_device *cp = data;
4385 BT_DBG("%s", hdev->name);
4387 if (!bdaddr_type_is_valid(cp->addr.type))
4388 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4389 MGMT_STATUS_INVALID_PARAMS,
4390 &cp->addr, sizeof(cp->addr));
4394 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4397 status = MGMT_STATUS_INVALID_PARAMS;
4401 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4403 status = MGMT_STATUS_SUCCESS;
4406 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4407 &cp->addr, sizeof(cp->addr));
4409 hci_dev_unlock(hdev);
4414 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4417 struct mgmt_cp_set_device_id *cp = data;
4418 struct hci_request req;
4422 BT_DBG("%s", hdev->name);
4424 source = __le16_to_cpu(cp->source);
4426 if (source > 0x0002)
4427 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4428 MGMT_STATUS_INVALID_PARAMS);
4432 hdev->devid_source = source;
4433 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4434 hdev->devid_product = __le16_to_cpu(cp->product);
4435 hdev->devid_version = __le16_to_cpu(cp->version);
4437 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4440 hci_req_init(&req, hdev);
4442 hci_req_run(&req, NULL);
4444 hci_dev_unlock(hdev);
4449 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4452 BT_DBG("status %d", status);
4455 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4458 struct cmd_lookup match = { NULL, hdev };
4459 struct hci_request req;
4464 u8 mgmt_err = mgmt_status(status);
4466 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4467 cmd_status_rsp, &mgmt_err);
4471 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4472 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4474 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4476 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4479 new_settings(hdev, match.sk);
4484 /* If "Set Advertising" was just disabled and instance advertising was
4485 * set up earlier, then enable the advertising instance.
4487 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4488 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
4491 hci_req_init(&req, hdev);
4493 update_adv_data(&req);
4494 enable_advertising(&req);
4496 if (hci_req_run(&req, enable_advertising_instance) < 0)
4497 BT_ERR("Failed to re-configure advertising");
4500 hci_dev_unlock(hdev);
4503 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4506 struct mgmt_mode *cp = data;
4507 struct mgmt_pending_cmd *cmd;
4508 struct hci_request req;
4512 BT_DBG("request for %s", hdev->name);
4514 status = mgmt_le_support(hdev);
4516 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4519 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4520 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4521 MGMT_STATUS_INVALID_PARAMS);
4527 /* The following conditions are ones which mean that we should
4528 * not do any HCI communication but directly send a mgmt
4529 * response to user space (after toggling the flag if
4532 if (!hdev_is_powered(hdev) ||
4533 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4534 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4535 hci_conn_num(hdev, LE_LINK) > 0 ||
4536 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4537 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4541 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4542 if (cp->val == 0x02)
4543 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4545 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4547 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4548 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4551 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4556 err = new_settings(hdev, sk);
4561 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4562 pending_find(MGMT_OP_SET_LE, hdev)) {
4563 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4568 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4574 hci_req_init(&req, hdev);
4576 if (cp->val == 0x02)
4577 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4579 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4582 /* Switch to instance "0" for the Set Advertising setting. */
4583 update_adv_data_for_instance(&req, 0);
4584 update_scan_rsp_data_for_instance(&req, 0);
4585 enable_advertising(&req);
4587 disable_advertising(&req);
4590 err = hci_req_run(&req, set_advertising_complete);
4592 mgmt_pending_remove(cmd);
4595 hci_dev_unlock(hdev);
4599 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4600 void *data, u16 len)
4602 struct mgmt_cp_set_static_address *cp = data;
4605 BT_DBG("%s", hdev->name);
4607 if (!lmp_le_capable(hdev))
4608 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4609 MGMT_STATUS_NOT_SUPPORTED);
4611 if (hdev_is_powered(hdev))
4612 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4613 MGMT_STATUS_REJECTED);
4615 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4616 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4617 return mgmt_cmd_status(sk, hdev->id,
4618 MGMT_OP_SET_STATIC_ADDRESS,
4619 MGMT_STATUS_INVALID_PARAMS);
4621 /* Two most significant bits shall be set */
4622 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4623 return mgmt_cmd_status(sk, hdev->id,
4624 MGMT_OP_SET_STATIC_ADDRESS,
4625 MGMT_STATUS_INVALID_PARAMS);
4630 bacpy(&hdev->static_addr, &cp->bdaddr);
4632 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4636 err = new_settings(hdev, sk);
4639 hci_dev_unlock(hdev);
4643 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4644 void *data, u16 len)
4646 struct mgmt_cp_set_scan_params *cp = data;
4647 __u16 interval, window;
4650 BT_DBG("%s", hdev->name);
4652 if (!lmp_le_capable(hdev))
4653 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4654 MGMT_STATUS_NOT_SUPPORTED);
4656 interval = __le16_to_cpu(cp->interval);
4658 if (interval < 0x0004 || interval > 0x4000)
4659 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4660 MGMT_STATUS_INVALID_PARAMS);
4662 window = __le16_to_cpu(cp->window);
4664 if (window < 0x0004 || window > 0x4000)
4665 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4666 MGMT_STATUS_INVALID_PARAMS);
4668 if (window > interval)
4669 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4670 MGMT_STATUS_INVALID_PARAMS);
4674 hdev->le_scan_interval = interval;
4675 hdev->le_scan_window = window;
4677 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4680 /* If background scan is running, restart it so new parameters are
4683 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4684 hdev->discovery.state == DISCOVERY_STOPPED) {
4685 struct hci_request req;
4687 hci_req_init(&req, hdev);
4689 hci_req_add_le_scan_disable(&req);
4690 hci_req_add_le_passive_scan(&req);
4692 hci_req_run(&req, NULL);
4695 hci_dev_unlock(hdev);
4700 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4703 struct mgmt_pending_cmd *cmd;
4705 BT_DBG("status 0x%02x", status);
4709 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4714 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4715 mgmt_status(status));
4717 struct mgmt_mode *cp = cmd->param;
4720 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4722 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4724 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4725 new_settings(hdev, cmd->sk);
4728 mgmt_pending_remove(cmd);
4731 hci_dev_unlock(hdev);
4734 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4735 void *data, u16 len)
4737 struct mgmt_mode *cp = data;
4738 struct mgmt_pending_cmd *cmd;
4739 struct hci_request req;
4742 BT_DBG("%s", hdev->name);
4744 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4745 hdev->hci_ver < BLUETOOTH_VER_1_2)
4746 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4747 MGMT_STATUS_NOT_SUPPORTED);
4749 if (cp->val != 0x00 && cp->val != 0x01)
4750 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4751 MGMT_STATUS_INVALID_PARAMS);
4755 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4756 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4761 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4762 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4767 if (!hdev_is_powered(hdev)) {
4768 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4769 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4771 new_settings(hdev, sk);
4775 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4782 hci_req_init(&req, hdev);
4784 write_fast_connectable(&req, cp->val);
4786 err = hci_req_run(&req, fast_connectable_complete);
4788 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4789 MGMT_STATUS_FAILED);
4790 mgmt_pending_remove(cmd);
4794 hci_dev_unlock(hdev);
4799 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4801 struct mgmt_pending_cmd *cmd;
4803 BT_DBG("status 0x%02x", status);
4807 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4812 u8 mgmt_err = mgmt_status(status);
4814 /* We need to restore the flag if related HCI commands
4817 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4819 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4821 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4822 new_settings(hdev, cmd->sk);
4825 mgmt_pending_remove(cmd);
4828 hci_dev_unlock(hdev);
4831 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4833 struct mgmt_mode *cp = data;
4834 struct mgmt_pending_cmd *cmd;
4835 struct hci_request req;
4838 BT_DBG("request for %s", hdev->name);
4840 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4841 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4842 MGMT_STATUS_NOT_SUPPORTED);
4844 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4845 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4846 MGMT_STATUS_REJECTED);
4848 if (cp->val != 0x00 && cp->val != 0x01)
4849 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4850 MGMT_STATUS_INVALID_PARAMS);
4854 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4855 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4859 if (!hdev_is_powered(hdev)) {
4861 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4862 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4863 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4864 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4865 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4868 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4870 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4874 err = new_settings(hdev, sk);
4878 /* Reject disabling when powered on */
4880 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4881 MGMT_STATUS_REJECTED);
4884 /* When configuring a dual-mode controller to operate
4885 * with LE only and using a static address, then switching
4886 * BR/EDR back on is not allowed.
4888 * Dual-mode controllers shall operate with the public
4889 * address as its identity address for BR/EDR and LE. So
4890 * reject the attempt to create an invalid configuration.
4892 * The same restrictions applies when secure connections
4893 * has been enabled. For BR/EDR this is a controller feature
4894 * while for LE it is a host stack feature. This means that
4895 * switching BR/EDR back on when secure connections has been
4896 * enabled is not a supported transaction.
4898 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4899 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4900 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4901 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4902 MGMT_STATUS_REJECTED);
4907 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4908 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4913 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4919 /* We need to flip the bit already here so that update_adv_data
4920 * generates the correct flags.
4922 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4924 hci_req_init(&req, hdev);
4926 write_fast_connectable(&req, false);
4927 __hci_update_page_scan(&req);
4929 /* Since only the advertising data flags will change, there
4930 * is no need to update the scan response data.
4932 update_adv_data(&req);
4934 err = hci_req_run(&req, set_bredr_complete);
4936 mgmt_pending_remove(cmd);
4939 hci_dev_unlock(hdev);
4943 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4945 struct mgmt_pending_cmd *cmd;
4946 struct mgmt_mode *cp;
4948 BT_DBG("%s status %u", hdev->name, status);
4952 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4957 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4958 mgmt_status(status));
4966 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4967 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4970 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4971 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4974 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4975 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4979 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4980 new_settings(hdev, cmd->sk);
4983 mgmt_pending_remove(cmd);
4985 hci_dev_unlock(hdev);
4988 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4989 void *data, u16 len)
4991 struct mgmt_mode *cp = data;
4992 struct mgmt_pending_cmd *cmd;
4993 struct hci_request req;
4997 BT_DBG("request for %s", hdev->name);
4999 if (!lmp_sc_capable(hdev) &&
5000 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5001 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5002 MGMT_STATUS_NOT_SUPPORTED);
5004 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5005 lmp_sc_capable(hdev) &&
5006 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5007 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5008 MGMT_STATUS_REJECTED);
5010 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5011 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5012 MGMT_STATUS_INVALID_PARAMS);
5016 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5017 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5021 changed = !hci_dev_test_and_set_flag(hdev,
5023 if (cp->val == 0x02)
5024 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5026 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5028 changed = hci_dev_test_and_clear_flag(hdev,
5030 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5033 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5038 err = new_settings(hdev, sk);
5043 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5044 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5051 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5052 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5053 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5057 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5063 hci_req_init(&req, hdev);
5064 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5065 err = hci_req_run(&req, sc_enable_complete);
5067 mgmt_pending_remove(cmd);
5072 hci_dev_unlock(hdev);
5076 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5077 void *data, u16 len)
5079 struct mgmt_mode *cp = data;
5080 bool changed, use_changed;
5083 BT_DBG("request for %s", hdev->name);
5085 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5086 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5087 MGMT_STATUS_INVALID_PARAMS);
5092 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5094 changed = hci_dev_test_and_clear_flag(hdev,
5095 HCI_KEEP_DEBUG_KEYS);
5097 if (cp->val == 0x02)
5098 use_changed = !hci_dev_test_and_set_flag(hdev,
5099 HCI_USE_DEBUG_KEYS);
5101 use_changed = hci_dev_test_and_clear_flag(hdev,
5102 HCI_USE_DEBUG_KEYS);
5104 if (hdev_is_powered(hdev) && use_changed &&
5105 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5106 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5107 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5108 sizeof(mode), &mode);
5111 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5116 err = new_settings(hdev, sk);
5119 hci_dev_unlock(hdev);
5123 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5126 struct mgmt_cp_set_privacy *cp = cp_data;
5130 BT_DBG("request for %s", hdev->name);
5132 if (!lmp_le_capable(hdev))
5133 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5134 MGMT_STATUS_NOT_SUPPORTED);
5136 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5137 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5138 MGMT_STATUS_INVALID_PARAMS);
5140 if (hdev_is_powered(hdev))
5141 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5142 MGMT_STATUS_REJECTED);
5146 /* If user space supports this command it is also expected to
5147 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5149 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5152 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5153 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5154 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5156 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5157 memset(hdev->irk, 0, sizeof(hdev->irk));
5158 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5161 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5166 err = new_settings(hdev, sk);
5169 hci_dev_unlock(hdev);
5173 static bool irk_is_valid(struct mgmt_irk_info *irk)
5175 switch (irk->addr.type) {
5176 case BDADDR_LE_PUBLIC:
5179 case BDADDR_LE_RANDOM:
5180 /* Two most significant bits shall be set */
5181 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5189 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5192 struct mgmt_cp_load_irks *cp = cp_data;
5193 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5194 sizeof(struct mgmt_irk_info));
5195 u16 irk_count, expected_len;
5198 BT_DBG("request for %s", hdev->name);
5200 if (!lmp_le_capable(hdev))
5201 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5202 MGMT_STATUS_NOT_SUPPORTED);
5204 irk_count = __le16_to_cpu(cp->irk_count);
5205 if (irk_count > max_irk_count) {
5206 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5207 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5208 MGMT_STATUS_INVALID_PARAMS);
5211 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5212 if (expected_len != len) {
5213 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5215 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5216 MGMT_STATUS_INVALID_PARAMS);
5219 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5221 for (i = 0; i < irk_count; i++) {
5222 struct mgmt_irk_info *key = &cp->irks[i];
5224 if (!irk_is_valid(key))
5225 return mgmt_cmd_status(sk, hdev->id,
5227 MGMT_STATUS_INVALID_PARAMS);
5232 hci_smp_irks_clear(hdev);
5234 for (i = 0; i < irk_count; i++) {
5235 struct mgmt_irk_info *irk = &cp->irks[i];
5238 if (irk->addr.type == BDADDR_LE_PUBLIC)
5239 addr_type = ADDR_LE_DEV_PUBLIC;
5241 addr_type = ADDR_LE_DEV_RANDOM;
5243 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5247 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5249 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5251 hci_dev_unlock(hdev);
5256 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5258 if (key->master != 0x00 && key->master != 0x01)
5261 switch (key->addr.type) {
5262 case BDADDR_LE_PUBLIC:
5265 case BDADDR_LE_RANDOM:
5266 /* Two most significant bits shall be set */
5267 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5275 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5276 void *cp_data, u16 len)
5278 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5279 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5280 sizeof(struct mgmt_ltk_info));
5281 u16 key_count, expected_len;
5284 BT_DBG("request for %s", hdev->name);
5286 if (!lmp_le_capable(hdev))
5287 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5288 MGMT_STATUS_NOT_SUPPORTED);
5290 key_count = __le16_to_cpu(cp->key_count);
5291 if (key_count > max_key_count) {
5292 BT_ERR("load_ltks: too big key_count value %u", key_count);
5293 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5294 MGMT_STATUS_INVALID_PARAMS);
5297 expected_len = sizeof(*cp) + key_count *
5298 sizeof(struct mgmt_ltk_info);
5299 if (expected_len != len) {
5300 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5302 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5303 MGMT_STATUS_INVALID_PARAMS);
5306 BT_DBG("%s key_count %u", hdev->name, key_count);
5308 for (i = 0; i < key_count; i++) {
5309 struct mgmt_ltk_info *key = &cp->keys[i];
5311 if (!ltk_is_valid(key))
5312 return mgmt_cmd_status(sk, hdev->id,
5313 MGMT_OP_LOAD_LONG_TERM_KEYS,
5314 MGMT_STATUS_INVALID_PARAMS);
5319 hci_smp_ltks_clear(hdev);
5321 for (i = 0; i < key_count; i++) {
5322 struct mgmt_ltk_info *key = &cp->keys[i];
5323 u8 type, addr_type, authenticated;
5325 if (key->addr.type == BDADDR_LE_PUBLIC)
5326 addr_type = ADDR_LE_DEV_PUBLIC;
5328 addr_type = ADDR_LE_DEV_RANDOM;
5330 switch (key->type) {
5331 case MGMT_LTK_UNAUTHENTICATED:
5332 authenticated = 0x00;
5333 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5335 case MGMT_LTK_AUTHENTICATED:
5336 authenticated = 0x01;
5337 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5339 case MGMT_LTK_P256_UNAUTH:
5340 authenticated = 0x00;
5341 type = SMP_LTK_P256;
5343 case MGMT_LTK_P256_AUTH:
5344 authenticated = 0x01;
5345 type = SMP_LTK_P256;
5347 case MGMT_LTK_P256_DEBUG:
5348 authenticated = 0x00;
5349 type = SMP_LTK_P256_DEBUG;
5354 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5355 authenticated, key->val, key->enc_size, key->ediv,
5359 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5362 hci_dev_unlock(hdev);
5367 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5369 struct hci_conn *conn = cmd->user_data;
5370 struct mgmt_rp_get_conn_info rp;
5373 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5375 if (status == MGMT_STATUS_SUCCESS) {
5376 rp.rssi = conn->rssi;
5377 rp.tx_power = conn->tx_power;
5378 rp.max_tx_power = conn->max_tx_power;
5380 rp.rssi = HCI_RSSI_INVALID;
5381 rp.tx_power = HCI_TX_POWER_INVALID;
5382 rp.max_tx_power = HCI_TX_POWER_INVALID;
5385 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5386 status, &rp, sizeof(rp));
5388 hci_conn_drop(conn);
5394 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5397 struct hci_cp_read_rssi *cp;
5398 struct mgmt_pending_cmd *cmd;
5399 struct hci_conn *conn;
5403 BT_DBG("status 0x%02x", hci_status);
5407 /* Commands sent in request are either Read RSSI or Read Transmit Power
5408 * Level so we check which one was last sent to retrieve connection
5409 * handle. Both commands have handle as first parameter so it's safe to
5410 * cast data on the same command struct.
5412 * First command sent is always Read RSSI and we fail only if it fails.
5413 * In other case we simply override error to indicate success as we
5414 * already remembered if TX power value is actually valid.
5416 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5418 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5419 status = MGMT_STATUS_SUCCESS;
5421 status = mgmt_status(hci_status);
5425 BT_ERR("invalid sent_cmd in conn_info response");
5429 handle = __le16_to_cpu(cp->handle);
5430 conn = hci_conn_hash_lookup_handle(hdev, handle);
5432 BT_ERR("unknown handle (%d) in conn_info response", handle);
5436 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5440 cmd->cmd_complete(cmd, status);
5441 mgmt_pending_remove(cmd);
5444 hci_dev_unlock(hdev);
5447 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5450 struct mgmt_cp_get_conn_info *cp = data;
5451 struct mgmt_rp_get_conn_info rp;
5452 struct hci_conn *conn;
5453 unsigned long conn_info_age;
5456 BT_DBG("%s", hdev->name);
5458 memset(&rp, 0, sizeof(rp));
5459 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5460 rp.addr.type = cp->addr.type;
5462 if (!bdaddr_type_is_valid(cp->addr.type))
5463 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5464 MGMT_STATUS_INVALID_PARAMS,
5469 if (!hdev_is_powered(hdev)) {
5470 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5471 MGMT_STATUS_NOT_POWERED, &rp,
5476 if (cp->addr.type == BDADDR_BREDR)
5477 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5480 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5482 if (!conn || conn->state != BT_CONNECTED) {
5483 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5484 MGMT_STATUS_NOT_CONNECTED, &rp,
5489 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5490 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5491 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5495 /* To avoid client trying to guess when to poll again for information we
5496 * calculate conn info age as random value between min/max set in hdev.
5498 conn_info_age = hdev->conn_info_min_age +
5499 prandom_u32_max(hdev->conn_info_max_age -
5500 hdev->conn_info_min_age);
5502 /* Query controller to refresh cached values if they are too old or were
5505 if (time_after(jiffies, conn->conn_info_timestamp +
5506 msecs_to_jiffies(conn_info_age)) ||
5507 !conn->conn_info_timestamp) {
5508 struct hci_request req;
5509 struct hci_cp_read_tx_power req_txp_cp;
5510 struct hci_cp_read_rssi req_rssi_cp;
5511 struct mgmt_pending_cmd *cmd;
5513 hci_req_init(&req, hdev);
5514 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5515 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5518 /* For LE links TX power does not change thus we don't need to
5519 * query for it once value is known.
5521 if (!bdaddr_type_is_le(cp->addr.type) ||
5522 conn->tx_power == HCI_TX_POWER_INVALID) {
5523 req_txp_cp.handle = cpu_to_le16(conn->handle);
5524 req_txp_cp.type = 0x00;
5525 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5526 sizeof(req_txp_cp), &req_txp_cp);
5529 /* Max TX power needs to be read only once per connection */
5530 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5531 req_txp_cp.handle = cpu_to_le16(conn->handle);
5532 req_txp_cp.type = 0x01;
5533 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5534 sizeof(req_txp_cp), &req_txp_cp);
5537 err = hci_req_run(&req, conn_info_refresh_complete);
5541 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5548 hci_conn_hold(conn);
5549 cmd->user_data = hci_conn_get(conn);
5550 cmd->cmd_complete = conn_info_cmd_complete;
5552 conn->conn_info_timestamp = jiffies;
5554 /* Cache is valid, just reply with values cached in hci_conn */
5555 rp.rssi = conn->rssi;
5556 rp.tx_power = conn->tx_power;
5557 rp.max_tx_power = conn->max_tx_power;
5559 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5560 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5564 hci_dev_unlock(hdev);
5568 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5570 struct hci_conn *conn = cmd->user_data;
5571 struct mgmt_rp_get_clock_info rp;
5572 struct hci_dev *hdev;
5575 memset(&rp, 0, sizeof(rp));
5576 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5581 hdev = hci_dev_get(cmd->index);
5583 rp.local_clock = cpu_to_le32(hdev->clock);
5588 rp.piconet_clock = cpu_to_le32(conn->clock);
5589 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5593 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5597 hci_conn_drop(conn);
5604 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5606 struct hci_cp_read_clock *hci_cp;
5607 struct mgmt_pending_cmd *cmd;
5608 struct hci_conn *conn;
5610 BT_DBG("%s status %u", hdev->name, status);
5614 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5618 if (hci_cp->which) {
5619 u16 handle = __le16_to_cpu(hci_cp->handle);
5620 conn = hci_conn_hash_lookup_handle(hdev, handle);
5625 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5629 cmd->cmd_complete(cmd, mgmt_status(status));
5630 mgmt_pending_remove(cmd);
5633 hci_dev_unlock(hdev);
5636 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5639 struct mgmt_cp_get_clock_info *cp = data;
5640 struct mgmt_rp_get_clock_info rp;
5641 struct hci_cp_read_clock hci_cp;
5642 struct mgmt_pending_cmd *cmd;
5643 struct hci_request req;
5644 struct hci_conn *conn;
5647 BT_DBG("%s", hdev->name);
5649 memset(&rp, 0, sizeof(rp));
5650 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5651 rp.addr.type = cp->addr.type;
5653 if (cp->addr.type != BDADDR_BREDR)
5654 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5655 MGMT_STATUS_INVALID_PARAMS,
5660 if (!hdev_is_powered(hdev)) {
5661 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5662 MGMT_STATUS_NOT_POWERED, &rp,
5667 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5668 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5670 if (!conn || conn->state != BT_CONNECTED) {
5671 err = mgmt_cmd_complete(sk, hdev->id,
5672 MGMT_OP_GET_CLOCK_INFO,
5673 MGMT_STATUS_NOT_CONNECTED,
5681 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5687 cmd->cmd_complete = clock_info_cmd_complete;
5689 hci_req_init(&req, hdev);
5691 memset(&hci_cp, 0, sizeof(hci_cp));
5692 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5695 hci_conn_hold(conn);
5696 cmd->user_data = hci_conn_get(conn);
5698 hci_cp.handle = cpu_to_le16(conn->handle);
5699 hci_cp.which = 0x01; /* Piconet clock */
5700 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5703 err = hci_req_run(&req, get_clock_info_complete);
5705 mgmt_pending_remove(cmd);
5708 hci_dev_unlock(hdev);
5712 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5714 struct hci_conn *conn;
5716 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5720 if (conn->dst_type != type)
5723 if (conn->state != BT_CONNECTED)
5729 /* This function requires the caller holds hdev->lock */
5730 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5731 u8 addr_type, u8 auto_connect)
5733 struct hci_dev *hdev = req->hdev;
5734 struct hci_conn_params *params;
5736 params = hci_conn_params_add(hdev, addr, addr_type);
5740 if (params->auto_connect == auto_connect)
5743 list_del_init(¶ms->action);
5745 switch (auto_connect) {
5746 case HCI_AUTO_CONN_DISABLED:
5747 case HCI_AUTO_CONN_LINK_LOSS:
5748 __hci_update_background_scan(req);
5750 case HCI_AUTO_CONN_REPORT:
5751 list_add(¶ms->action, &hdev->pend_le_reports);
5752 __hci_update_background_scan(req);
5754 case HCI_AUTO_CONN_DIRECT:
5755 case HCI_AUTO_CONN_ALWAYS:
5756 if (!is_connected(hdev, addr, addr_type)) {
5757 list_add(¶ms->action, &hdev->pend_le_conns);
5758 __hci_update_background_scan(req);
5763 params->auto_connect = auto_connect;
5765 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5771 static void device_added(struct sock *sk, struct hci_dev *hdev,
5772 bdaddr_t *bdaddr, u8 type, u8 action)
5774 struct mgmt_ev_device_added ev;
5776 bacpy(&ev.addr.bdaddr, bdaddr);
5777 ev.addr.type = type;
5780 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5783 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5785 struct mgmt_pending_cmd *cmd;
5787 BT_DBG("status 0x%02x", status);
5791 cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
5795 cmd->cmd_complete(cmd, mgmt_status(status));
5796 mgmt_pending_remove(cmd);
5799 hci_dev_unlock(hdev);
5802 static int add_device(struct sock *sk, struct hci_dev *hdev,
5803 void *data, u16 len)
5805 struct mgmt_cp_add_device *cp = data;
5806 struct mgmt_pending_cmd *cmd;
5807 struct hci_request req;
5808 u8 auto_conn, addr_type;
5811 BT_DBG("%s", hdev->name);
5813 if (!bdaddr_type_is_valid(cp->addr.type) ||
5814 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5815 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5816 MGMT_STATUS_INVALID_PARAMS,
5817 &cp->addr, sizeof(cp->addr));
5819 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5820 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5821 MGMT_STATUS_INVALID_PARAMS,
5822 &cp->addr, sizeof(cp->addr));
5824 hci_req_init(&req, hdev);
5828 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5834 cmd->cmd_complete = addr_cmd_complete;
5836 if (cp->addr.type == BDADDR_BREDR) {
5837 /* Only incoming connections action is supported for now */
5838 if (cp->action != 0x01) {
5839 err = cmd->cmd_complete(cmd,
5840 MGMT_STATUS_INVALID_PARAMS);
5841 mgmt_pending_remove(cmd);
5845 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5850 __hci_update_page_scan(&req);
5855 if (cp->addr.type == BDADDR_LE_PUBLIC)
5856 addr_type = ADDR_LE_DEV_PUBLIC;
5858 addr_type = ADDR_LE_DEV_RANDOM;
5860 if (cp->action == 0x02)
5861 auto_conn = HCI_AUTO_CONN_ALWAYS;
5862 else if (cp->action == 0x01)
5863 auto_conn = HCI_AUTO_CONN_DIRECT;
5865 auto_conn = HCI_AUTO_CONN_REPORT;
5867 /* If the connection parameters don't exist for this device,
5868 * they will be created and configured with defaults.
5870 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5872 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5873 mgmt_pending_remove(cmd);
5878 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5880 err = hci_req_run(&req, add_device_complete);
5882 /* ENODATA means no HCI commands were needed (e.g. if
5883 * the adapter is powered off).
5885 if (err == -ENODATA)
5886 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5887 mgmt_pending_remove(cmd);
5891 hci_dev_unlock(hdev);
5895 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5896 bdaddr_t *bdaddr, u8 type)
5898 struct mgmt_ev_device_removed ev;
5900 bacpy(&ev.addr.bdaddr, bdaddr);
5901 ev.addr.type = type;
5903 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5906 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5908 struct mgmt_pending_cmd *cmd;
5910 BT_DBG("status 0x%02x", status);
5914 cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5918 cmd->cmd_complete(cmd, mgmt_status(status));
5919 mgmt_pending_remove(cmd);
5922 hci_dev_unlock(hdev);
5925 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5926 void *data, u16 len)
5928 struct mgmt_cp_remove_device *cp = data;
5929 struct mgmt_pending_cmd *cmd;
5930 struct hci_request req;
5933 BT_DBG("%s", hdev->name);
5935 hci_req_init(&req, hdev);
5939 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5945 cmd->cmd_complete = addr_cmd_complete;
5947 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5948 struct hci_conn_params *params;
5951 if (!bdaddr_type_is_valid(cp->addr.type)) {
5952 err = cmd->cmd_complete(cmd,
5953 MGMT_STATUS_INVALID_PARAMS);
5954 mgmt_pending_remove(cmd);
5958 if (cp->addr.type == BDADDR_BREDR) {
5959 err = hci_bdaddr_list_del(&hdev->whitelist,
5963 err = cmd->cmd_complete(cmd,
5964 MGMT_STATUS_INVALID_PARAMS);
5965 mgmt_pending_remove(cmd);
5969 __hci_update_page_scan(&req);
5971 device_removed(sk, hdev, &cp->addr.bdaddr,
5976 if (cp->addr.type == BDADDR_LE_PUBLIC)
5977 addr_type = ADDR_LE_DEV_PUBLIC;
5979 addr_type = ADDR_LE_DEV_RANDOM;
5981 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5984 err = cmd->cmd_complete(cmd,
5985 MGMT_STATUS_INVALID_PARAMS);
5986 mgmt_pending_remove(cmd);
5990 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5991 err = cmd->cmd_complete(cmd,
5992 MGMT_STATUS_INVALID_PARAMS);
5993 mgmt_pending_remove(cmd);
5997 list_del(¶ms->action);
5998 list_del(¶ms->list);
6000 __hci_update_background_scan(&req);
6002 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6004 struct hci_conn_params *p, *tmp;
6005 struct bdaddr_list *b, *btmp;
6007 if (cp->addr.type) {
6008 err = cmd->cmd_complete(cmd,
6009 MGMT_STATUS_INVALID_PARAMS);
6010 mgmt_pending_remove(cmd);
6014 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6015 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6020 __hci_update_page_scan(&req);
6022 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6023 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6025 device_removed(sk, hdev, &p->addr, p->addr_type);
6026 list_del(&p->action);
6031 BT_DBG("All LE connection parameters were removed");
6033 __hci_update_background_scan(&req);
6037 err = hci_req_run(&req, remove_device_complete);
6039 /* ENODATA means no HCI commands were needed (e.g. if
6040 * the adapter is powered off).
6042 if (err == -ENODATA)
6043 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6044 mgmt_pending_remove(cmd);
6048 hci_dev_unlock(hdev);
6052 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6055 struct mgmt_cp_load_conn_param *cp = data;
6056 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6057 sizeof(struct mgmt_conn_param));
6058 u16 param_count, expected_len;
6061 if (!lmp_le_capable(hdev))
6062 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6063 MGMT_STATUS_NOT_SUPPORTED);
6065 param_count = __le16_to_cpu(cp->param_count);
6066 if (param_count > max_param_count) {
6067 BT_ERR("load_conn_param: too big param_count value %u",
6069 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6070 MGMT_STATUS_INVALID_PARAMS);
6073 expected_len = sizeof(*cp) + param_count *
6074 sizeof(struct mgmt_conn_param);
6075 if (expected_len != len) {
6076 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6078 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6079 MGMT_STATUS_INVALID_PARAMS);
6082 BT_DBG("%s param_count %u", hdev->name, param_count);
6086 hci_conn_params_clear_disabled(hdev);
6088 for (i = 0; i < param_count; i++) {
6089 struct mgmt_conn_param *param = &cp->params[i];
6090 struct hci_conn_params *hci_param;
6091 u16 min, max, latency, timeout;
6094 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
6097 if (param->addr.type == BDADDR_LE_PUBLIC) {
6098 addr_type = ADDR_LE_DEV_PUBLIC;
6099 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6100 addr_type = ADDR_LE_DEV_RANDOM;
6102 BT_ERR("Ignoring invalid connection parameters");
6106 min = le16_to_cpu(param->min_interval);
6107 max = le16_to_cpu(param->max_interval);
6108 latency = le16_to_cpu(param->latency);
6109 timeout = le16_to_cpu(param->timeout);
6111 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6112 min, max, latency, timeout);
6114 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6115 BT_ERR("Ignoring invalid connection parameters");
6119 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6122 BT_ERR("Failed to add connection parameters");
6126 hci_param->conn_min_interval = min;
6127 hci_param->conn_max_interval = max;
6128 hci_param->conn_latency = latency;
6129 hci_param->supervision_timeout = timeout;
6132 hci_dev_unlock(hdev);
6134 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6138 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6139 void *data, u16 len)
6141 struct mgmt_cp_set_external_config *cp = data;
6145 BT_DBG("%s", hdev->name);
6147 if (hdev_is_powered(hdev))
6148 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6149 MGMT_STATUS_REJECTED);
6151 if (cp->config != 0x00 && cp->config != 0x01)
6152 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6153 MGMT_STATUS_INVALID_PARAMS);
6155 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6156 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6157 MGMT_STATUS_NOT_SUPPORTED);
6162 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6164 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6166 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6173 err = new_options(hdev, sk);
6175 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6176 mgmt_index_removed(hdev);
6178 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6179 hci_dev_set_flag(hdev, HCI_CONFIG);
6180 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6182 queue_work(hdev->req_workqueue, &hdev->power_on);
6184 set_bit(HCI_RAW, &hdev->flags);
6185 mgmt_index_added(hdev);
6190 hci_dev_unlock(hdev);
6194 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6195 void *data, u16 len)
6197 struct mgmt_cp_set_public_address *cp = data;
6201 BT_DBG("%s", hdev->name);
6203 if (hdev_is_powered(hdev))
6204 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6205 MGMT_STATUS_REJECTED);
6207 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6208 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6209 MGMT_STATUS_INVALID_PARAMS);
6211 if (!hdev->set_bdaddr)
6212 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6213 MGMT_STATUS_NOT_SUPPORTED);
6217 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6218 bacpy(&hdev->public_addr, &cp->bdaddr);
6220 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6227 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6228 err = new_options(hdev, sk);
6230 if (is_configured(hdev)) {
6231 mgmt_index_removed(hdev);
6233 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6235 hci_dev_set_flag(hdev, HCI_CONFIG);
6236 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6238 queue_work(hdev->req_workqueue, &hdev->power_on);
6242 hci_dev_unlock(hdev);
6246 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6249 eir[eir_len++] = sizeof(type) + data_len;
6250 eir[eir_len++] = type;
6251 memcpy(&eir[eir_len], data, data_len);
6252 eir_len += data_len;
6257 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6258 void *data, u16 data_len)
6260 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6261 struct mgmt_rp_read_local_oob_ext_data *rp;
6264 u8 status, flags, role, addr[7], hash[16], rand[16];
6267 BT_DBG("%s", hdev->name);
6269 if (!hdev_is_powered(hdev))
6270 return mgmt_cmd_complete(sk, hdev->id,
6271 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6272 MGMT_STATUS_NOT_POWERED,
6273 &cp->type, sizeof(cp->type));
6276 case BIT(BDADDR_BREDR):
6277 status = mgmt_bredr_support(hdev);
6279 return mgmt_cmd_complete(sk, hdev->id,
6280 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6285 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6286 status = mgmt_le_support(hdev);
6288 return mgmt_cmd_complete(sk, hdev->id,
6289 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6292 eir_len = 9 + 3 + 18 + 18 + 3;
6295 return mgmt_cmd_complete(sk, hdev->id,
6296 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6297 MGMT_STATUS_INVALID_PARAMS,
6298 &cp->type, sizeof(cp->type));
6303 rp_len = sizeof(*rp) + eir_len;
6304 rp = kmalloc(rp_len, GFP_ATOMIC);
6306 hci_dev_unlock(hdev);
6312 case BIT(BDADDR_BREDR):
6313 eir_len = eir_append_data(rp->eir, eir_len, EIR_CLASS_OF_DEV,
6314 hdev->dev_class, 3);
6316 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6317 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6318 smp_generate_oob(hdev, hash, rand) < 0) {
6319 hci_dev_unlock(hdev);
6320 err = mgmt_cmd_complete(sk, hdev->id,
6321 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6323 &cp->type, sizeof(cp->type));
6327 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6328 memcpy(addr, &hdev->rpa, 6);
6330 } else if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6331 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6332 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6333 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6334 memcpy(addr, &hdev->static_addr, 6);
6337 memcpy(addr, &hdev->bdaddr, 6);
6341 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6342 addr, sizeof(addr));
6344 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6349 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6350 &role, sizeof(role));
6352 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6353 eir_len = eir_append_data(rp->eir, eir_len,
6355 hash, sizeof(hash));
6357 eir_len = eir_append_data(rp->eir, eir_len,
6359 rand, sizeof(rand));
6362 flags = get_adv_discov_flags(hdev);
6364 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6365 flags |= LE_AD_NO_BREDR;
6367 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6368 &flags, sizeof(flags));
6372 rp->type = cp->type;
6373 rp->eir_len = cpu_to_le16(eir_len);
6375 hci_dev_unlock(hdev);
6377 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6379 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6380 MGMT_STATUS_SUCCESS, rp, sizeof(*rp) + eir_len);
6384 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6385 rp, sizeof(*rp) + eir_len,
6386 HCI_MGMT_OOB_DATA_EVENTS, sk);
6394 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6395 void *data, u16 data_len)
6397 struct mgmt_rp_read_adv_features *rp;
6402 BT_DBG("%s", hdev->name);
6406 rp_len = sizeof(*rp);
6408 /* Currently only one instance is supported, so just add 1 to the
6411 instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
6415 rp = kmalloc(rp_len, GFP_ATOMIC);
6417 hci_dev_unlock(hdev);
6421 rp->supported_flags = cpu_to_le32(0);
6422 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6423 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6424 rp->max_instances = 1;
6426 /* Currently only one instance is supported, so simply return the
6427 * current instance number.
6430 rp->num_instances = 1;
6431 rp->instance[0] = 1;
6433 rp->num_instances = 0;
6436 hci_dev_unlock(hdev);
6438 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6439 MGMT_STATUS_SUCCESS, rp, rp_len);
6446 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6449 u8 max_len = HCI_MAX_AD_LENGTH;
6452 /* TODO: Correctly reduce len based on adv_flags. */
6457 /* Make sure that the data is correctly formatted. */
6458 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6461 /* If the current field length would exceed the total data
6462 * length, then it's invalid.
6464 if (i + cur_len >= len)
6471 static void advertising_added(struct sock *sk, struct hci_dev *hdev,
6474 struct mgmt_ev_advertising_added ev;
6476 ev.instance = instance;
6478 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
6481 static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
6484 struct mgmt_ev_advertising_removed ev;
6486 ev.instance = instance;
6488 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
6491 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6494 struct mgmt_pending_cmd *cmd;
6495 struct mgmt_rp_add_advertising rp;
6497 BT_DBG("status %d", status);
6501 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6504 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6505 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6506 advertising_removed(cmd ? cmd->sk : NULL, hdev, 1);
6515 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6516 mgmt_status(status));
6518 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6519 mgmt_status(status), &rp, sizeof(rp));
6521 mgmt_pending_remove(cmd);
6524 hci_dev_unlock(hdev);
6527 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6528 void *data, u16 data_len)
6530 struct mgmt_cp_add_advertising *cp = data;
6531 struct mgmt_rp_add_advertising rp;
6535 struct mgmt_pending_cmd *cmd;
6536 struct hci_request req;
6538 BT_DBG("%s", hdev->name);
6540 status = mgmt_le_support(hdev);
6542 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6545 flags = __le32_to_cpu(cp->flags);
6547 /* The current implementation only supports adding one instance and
6548 * doesn't support flags.
6550 if (cp->instance != 0x01 || flags)
6551 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6552 MGMT_STATUS_INVALID_PARAMS);
6556 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6557 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6558 pending_find(MGMT_OP_SET_LE, hdev)) {
6559 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6564 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len) ||
6565 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6566 cp->scan_rsp_len)) {
6567 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6568 MGMT_STATUS_INVALID_PARAMS);
6572 hdev->adv_instance.flags = flags;
6573 hdev->adv_instance.adv_data_len = cp->adv_data_len;
6574 hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len;
6576 if (cp->adv_data_len)
6577 memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len);
6579 if (cp->scan_rsp_len)
6580 memcpy(hdev->adv_instance.scan_rsp_data,
6581 cp->data + cp->adv_data_len, cp->scan_rsp_len);
6583 if (!hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING_INSTANCE))
6584 advertising_added(sk, hdev, 1);
6586 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
6587 * we have no HCI communication to make. Simply return.
6589 if (!hdev_is_powered(hdev) ||
6590 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6592 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6593 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6597 /* We're good to go, update advertising data, parameters, and start
6600 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6607 hci_req_init(&req, hdev);
6609 update_adv_data(&req);
6610 update_scan_rsp_data(&req);
6611 enable_advertising(&req);
6613 err = hci_req_run(&req, add_advertising_complete);
6615 mgmt_pending_remove(cmd);
6618 hci_dev_unlock(hdev);
6623 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6626 struct mgmt_pending_cmd *cmd;
6627 struct mgmt_rp_remove_advertising rp;
6629 BT_DBG("status %d", status);
6633 /* A failure status here only means that we failed to disable
6634 * advertising. Otherwise, the advertising instance has been removed,
6635 * so report success.
6637 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6643 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6645 mgmt_pending_remove(cmd);
6648 hci_dev_unlock(hdev);
6651 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6652 void *data, u16 data_len)
6654 struct mgmt_cp_remove_advertising *cp = data;
6655 struct mgmt_rp_remove_advertising rp;
6657 struct mgmt_pending_cmd *cmd;
6658 struct hci_request req;
6660 BT_DBG("%s", hdev->name);
6662 /* The current implementation only allows modifying instance no 1. A
6663 * value of 0 indicates that all instances should be cleared.
6665 if (cp->instance > 1)
6666 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6667 MGMT_STATUS_INVALID_PARAMS);
6671 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6672 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6673 pending_find(MGMT_OP_SET_LE, hdev)) {
6674 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6679 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
6680 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6681 MGMT_STATUS_INVALID_PARAMS);
6685 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6687 advertising_removed(sk, hdev, 1);
6689 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6691 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
6692 * we have no HCI communication to make. Simply return.
6694 if (!hdev_is_powered(hdev) ||
6695 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6697 err = mgmt_cmd_complete(sk, hdev->id,
6698 MGMT_OP_REMOVE_ADVERTISING,
6699 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6703 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6710 hci_req_init(&req, hdev);
6711 disable_advertising(&req);
6713 err = hci_req_run(&req, remove_advertising_complete);
6715 mgmt_pending_remove(cmd);
6718 hci_dev_unlock(hdev);
6723 static const struct hci_mgmt_handler mgmt_handlers[] = {
6724 { NULL }, /* 0x0000 (no command) */
6725 { read_version, MGMT_READ_VERSION_SIZE,
6727 HCI_MGMT_UNTRUSTED },
6728 { read_commands, MGMT_READ_COMMANDS_SIZE,
6730 HCI_MGMT_UNTRUSTED },
6731 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6733 HCI_MGMT_UNTRUSTED },
6734 { read_controller_info, MGMT_READ_INFO_SIZE,
6735 HCI_MGMT_UNTRUSTED },
6736 { set_powered, MGMT_SETTING_SIZE },
6737 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
6738 { set_connectable, MGMT_SETTING_SIZE },
6739 { set_fast_connectable, MGMT_SETTING_SIZE },
6740 { set_bondable, MGMT_SETTING_SIZE },
6741 { set_link_security, MGMT_SETTING_SIZE },
6742 { set_ssp, MGMT_SETTING_SIZE },
6743 { set_hs, MGMT_SETTING_SIZE },
6744 { set_le, MGMT_SETTING_SIZE },
6745 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
6746 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
6747 { add_uuid, MGMT_ADD_UUID_SIZE },
6748 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
6749 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6751 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6753 { disconnect, MGMT_DISCONNECT_SIZE },
6754 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
6755 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
6756 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6757 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
6758 { pair_device, MGMT_PAIR_DEVICE_SIZE },
6759 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6760 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
6761 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
6762 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6763 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
6764 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6765 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6766 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6768 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6769 { start_discovery, MGMT_START_DISCOVERY_SIZE },
6770 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
6771 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
6772 { block_device, MGMT_BLOCK_DEVICE_SIZE },
6773 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
6774 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
6775 { set_advertising, MGMT_SETTING_SIZE },
6776 { set_bredr, MGMT_SETTING_SIZE },
6777 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
6778 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
6779 { set_secure_conn, MGMT_SETTING_SIZE },
6780 { set_debug_keys, MGMT_SETTING_SIZE },
6781 { set_privacy, MGMT_SET_PRIVACY_SIZE },
6782 { load_irks, MGMT_LOAD_IRKS_SIZE,
6784 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
6785 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
6786 { add_device, MGMT_ADD_DEVICE_SIZE },
6787 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
6788 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6790 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6792 HCI_MGMT_UNTRUSTED },
6793 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6794 HCI_MGMT_UNCONFIGURED |
6795 HCI_MGMT_UNTRUSTED },
6796 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6797 HCI_MGMT_UNCONFIGURED },
6798 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6799 HCI_MGMT_UNCONFIGURED },
6800 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6802 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6803 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
6805 HCI_MGMT_UNTRUSTED },
6806 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
6807 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
6809 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
6812 void mgmt_index_added(struct hci_dev *hdev)
6814 struct mgmt_ev_ext_index ev;
6816 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6819 switch (hdev->dev_type) {
6821 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6822 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
6823 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6826 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
6827 HCI_MGMT_INDEX_EVENTS);
6840 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
6841 HCI_MGMT_EXT_INDEX_EVENTS);
6844 void mgmt_index_removed(struct hci_dev *hdev)
6846 struct mgmt_ev_ext_index ev;
6847 u8 status = MGMT_STATUS_INVALID_INDEX;
6849 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6852 switch (hdev->dev_type) {
6854 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6856 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6857 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
6858 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6861 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
6862 HCI_MGMT_INDEX_EVENTS);
6875 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
6876 HCI_MGMT_EXT_INDEX_EVENTS);
6879 /* This function requires the caller holds hdev->lock */
6880 static void restart_le_actions(struct hci_request *req)
6882 struct hci_dev *hdev = req->hdev;
6883 struct hci_conn_params *p;
6885 list_for_each_entry(p, &hdev->le_conn_params, list) {
6886 /* Needed for AUTO_OFF case where might not "really"
6887 * have been powered off.
6889 list_del_init(&p->action);
6891 switch (p->auto_connect) {
6892 case HCI_AUTO_CONN_DIRECT:
6893 case HCI_AUTO_CONN_ALWAYS:
6894 list_add(&p->action, &hdev->pend_le_conns);
6896 case HCI_AUTO_CONN_REPORT:
6897 list_add(&p->action, &hdev->pend_le_reports);
6904 __hci_update_background_scan(req);
6907 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6909 struct cmd_lookup match = { NULL, hdev };
6911 BT_DBG("status 0x%02x", status);
6914 /* Register the available SMP channels (BR/EDR and LE) only
6915 * when successfully powering on the controller. This late
6916 * registration is required so that LE SMP can clearly
6917 * decide if the public address or static address is used.
6924 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6926 new_settings(hdev, match.sk);
6928 hci_dev_unlock(hdev);
6934 static int powered_update_hci(struct hci_dev *hdev)
6936 struct hci_request req;
6939 hci_req_init(&req, hdev);
6941 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
6942 !lmp_host_ssp_capable(hdev)) {
6945 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
6947 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6950 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
6951 sizeof(support), &support);
6955 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
6956 lmp_bredr_capable(hdev)) {
6957 struct hci_cp_write_le_host_supported cp;
6962 /* Check first if we already have the right
6963 * host state (host features set)
6965 if (cp.le != lmp_host_le_capable(hdev) ||
6966 cp.simul != lmp_host_le_br_capable(hdev))
6967 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6971 if (lmp_le_capable(hdev)) {
6972 /* Make sure the controller has a good default for
6973 * advertising data. This also applies to the case
6974 * where BR/EDR was toggled during the AUTO_OFF phase.
6976 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
6977 update_adv_data(&req);
6978 update_scan_rsp_data(&req);
6981 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6982 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
6983 enable_advertising(&req);
6985 restart_le_actions(&req);
6988 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
6989 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6990 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6991 sizeof(link_sec), &link_sec);
6993 if (lmp_bredr_capable(hdev)) {
6994 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
6995 write_fast_connectable(&req, true);
6997 write_fast_connectable(&req, false);
6998 __hci_update_page_scan(&req);
7004 return hci_req_run(&req, powered_complete);
7007 int mgmt_powered(struct hci_dev *hdev, u8 powered)
7009 struct cmd_lookup match = { NULL, hdev };
7010 u8 status, zero_cod[] = { 0, 0, 0 };
7013 if (!hci_dev_test_flag(hdev, HCI_MGMT))
7017 if (powered_update_hci(hdev) == 0)
7020 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
7025 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7027 /* If the power off is because of hdev unregistration let
7028 * use the appropriate INVALID_INDEX status. Otherwise use
7029 * NOT_POWERED. We cover both scenarios here since later in
7030 * mgmt_index_removed() any hci_conn callbacks will have already
7031 * been triggered, potentially causing misleading DISCONNECTED
7034 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7035 status = MGMT_STATUS_INVALID_INDEX;
7037 status = MGMT_STATUS_NOT_POWERED;
7039 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7041 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
7042 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7043 zero_cod, sizeof(zero_cod), NULL);
7046 err = new_settings(hdev, match.sk);
7054 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7056 struct mgmt_pending_cmd *cmd;
7059 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7063 if (err == -ERFKILL)
7064 status = MGMT_STATUS_RFKILLED;
7066 status = MGMT_STATUS_FAILED;
7068 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7070 mgmt_pending_remove(cmd);
7073 void mgmt_discoverable_timeout(struct hci_dev *hdev)
7075 struct hci_request req;
7079 /* When discoverable timeout triggers, then just make sure
7080 * the limited discoverable flag is cleared. Even in the case
7081 * of a timeout triggered from general discoverable, it is
7082 * safe to unconditionally clear the flag.
7084 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
7085 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
7087 hci_req_init(&req, hdev);
7088 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7089 u8 scan = SCAN_PAGE;
7090 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
7091 sizeof(scan), &scan);
7095 /* Advertising instances don't use the global discoverable setting, so
7096 * only update AD if advertising was enabled using Set Advertising.
7098 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7099 update_adv_data(&req);
7101 hci_req_run(&req, NULL);
7103 hdev->discov_timeout = 0;
7105 new_settings(hdev, NULL);
7107 hci_dev_unlock(hdev);
7110 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7113 struct mgmt_ev_new_link_key ev;
7115 memset(&ev, 0, sizeof(ev));
7117 ev.store_hint = persistent;
7118 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7119 ev.key.addr.type = BDADDR_BREDR;
7120 ev.key.type = key->type;
7121 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7122 ev.key.pin_len = key->pin_len;
7124 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7127 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7129 switch (ltk->type) {
7132 if (ltk->authenticated)
7133 return MGMT_LTK_AUTHENTICATED;
7134 return MGMT_LTK_UNAUTHENTICATED;
7136 if (ltk->authenticated)
7137 return MGMT_LTK_P256_AUTH;
7138 return MGMT_LTK_P256_UNAUTH;
7139 case SMP_LTK_P256_DEBUG:
7140 return MGMT_LTK_P256_DEBUG;
7143 return MGMT_LTK_UNAUTHENTICATED;
7146 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7148 struct mgmt_ev_new_long_term_key ev;
7150 memset(&ev, 0, sizeof(ev));
7152 /* Devices using resolvable or non-resolvable random addresses
7153 * without providing an indentity resolving key don't require
7154 * to store long term keys. Their addresses will change the
7157 * Only when a remote device provides an identity address
7158 * make sure the long term key is stored. If the remote
7159 * identity is known, the long term keys are internally
7160 * mapped to the identity address. So allow static random
7161 * and public addresses here.
7163 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7164 (key->bdaddr.b[5] & 0xc0) != 0xc0)
7165 ev.store_hint = 0x00;
7167 ev.store_hint = persistent;
7169 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7170 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7171 ev.key.type = mgmt_ltk_type(key);
7172 ev.key.enc_size = key->enc_size;
7173 ev.key.ediv = key->ediv;
7174 ev.key.rand = key->rand;
7176 if (key->type == SMP_LTK)
7179 memcpy(ev.key.val, key->val, sizeof(key->val));
7181 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7184 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
7186 struct mgmt_ev_new_irk ev;
7188 memset(&ev, 0, sizeof(ev));
7190 /* For identity resolving keys from devices that are already
7191 * using a public address or static random address, do not
7192 * ask for storing this key. The identity resolving key really
7193 * is only mandatory for devices using resovlable random
7196 * Storing all identity resolving keys has the downside that
7197 * they will be also loaded on next boot of they system. More
7198 * identity resolving keys, means more time during scanning is
7199 * needed to actually resolve these addresses.
7201 if (bacmp(&irk->rpa, BDADDR_ANY))
7202 ev.store_hint = 0x01;
7204 ev.store_hint = 0x00;
7206 bacpy(&ev.rpa, &irk->rpa);
7207 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7208 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7209 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7211 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7214 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7217 struct mgmt_ev_new_csrk ev;
7219 memset(&ev, 0, sizeof(ev));
7221 /* Devices using resolvable or non-resolvable random addresses
7222 * without providing an indentity resolving key don't require
7223 * to store signature resolving keys. Their addresses will change
7224 * the next time around.
7226 * Only when a remote device provides an identity address
7227 * make sure the signature resolving key is stored. So allow
7228 * static random and public addresses here.
7230 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7231 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7232 ev.store_hint = 0x00;
7234 ev.store_hint = persistent;
7236 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7237 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7238 ev.key.type = csrk->type;
7239 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7241 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7244 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7245 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7246 u16 max_interval, u16 latency, u16 timeout)
7248 struct mgmt_ev_new_conn_param ev;
7250 if (!hci_is_identity_address(bdaddr, bdaddr_type))
7253 memset(&ev, 0, sizeof(ev));
7254 bacpy(&ev.addr.bdaddr, bdaddr);
7255 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7256 ev.store_hint = store_hint;
7257 ev.min_interval = cpu_to_le16(min_interval);
7258 ev.max_interval = cpu_to_le16(max_interval);
7259 ev.latency = cpu_to_le16(latency);
7260 ev.timeout = cpu_to_le16(timeout);
7262 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7265 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7266 u32 flags, u8 *name, u8 name_len)
7269 struct mgmt_ev_device_connected *ev = (void *) buf;
7272 bacpy(&ev->addr.bdaddr, &conn->dst);
7273 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7275 ev->flags = __cpu_to_le32(flags);
7277 /* We must ensure that the EIR Data fields are ordered and
7278 * unique. Keep it simple for now and avoid the problem by not
7279 * adding any BR/EDR data to the LE adv.
7281 if (conn->le_adv_data_len > 0) {
7282 memcpy(&ev->eir[eir_len],
7283 conn->le_adv_data, conn->le_adv_data_len);
7284 eir_len = conn->le_adv_data_len;
7287 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7290 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7291 eir_len = eir_append_data(ev->eir, eir_len,
7293 conn->dev_class, 3);
7296 ev->eir_len = cpu_to_le16(eir_len);
7298 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7299 sizeof(*ev) + eir_len, NULL);
7302 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7304 struct sock **sk = data;
7306 cmd->cmd_complete(cmd, 0);
7311 mgmt_pending_remove(cmd);
7314 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7316 struct hci_dev *hdev = data;
7317 struct mgmt_cp_unpair_device *cp = cmd->param;
7319 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7321 cmd->cmd_complete(cmd, 0);
7322 mgmt_pending_remove(cmd);
7325 bool mgmt_powering_down(struct hci_dev *hdev)
7327 struct mgmt_pending_cmd *cmd;
7328 struct mgmt_mode *cp;
7330 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7341 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7342 u8 link_type, u8 addr_type, u8 reason,
7343 bool mgmt_connected)
7345 struct mgmt_ev_device_disconnected ev;
7346 struct sock *sk = NULL;
7348 /* The connection is still in hci_conn_hash so test for 1
7349 * instead of 0 to know if this is the last one.
7351 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7352 cancel_delayed_work(&hdev->power_off);
7353 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7356 if (!mgmt_connected)
7359 if (link_type != ACL_LINK && link_type != LE_LINK)
7362 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7364 bacpy(&ev.addr.bdaddr, bdaddr);
7365 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7368 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7373 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7377 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7378 u8 link_type, u8 addr_type, u8 status)
7380 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7381 struct mgmt_cp_disconnect *cp;
7382 struct mgmt_pending_cmd *cmd;
7384 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7387 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7393 if (bacmp(bdaddr, &cp->addr.bdaddr))
7396 if (cp->addr.type != bdaddr_type)
7399 cmd->cmd_complete(cmd, mgmt_status(status));
7400 mgmt_pending_remove(cmd);
7403 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7404 u8 addr_type, u8 status)
7406 struct mgmt_ev_connect_failed ev;
7408 /* The connection is still in hci_conn_hash so test for 1
7409 * instead of 0 to know if this is the last one.
7411 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7412 cancel_delayed_work(&hdev->power_off);
7413 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7416 bacpy(&ev.addr.bdaddr, bdaddr);
7417 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7418 ev.status = mgmt_status(status);
7420 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7423 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7425 struct mgmt_ev_pin_code_request ev;
7427 bacpy(&ev.addr.bdaddr, bdaddr);
7428 ev.addr.type = BDADDR_BREDR;
7431 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7434 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7437 struct mgmt_pending_cmd *cmd;
7439 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7443 cmd->cmd_complete(cmd, mgmt_status(status));
7444 mgmt_pending_remove(cmd);
7447 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7450 struct mgmt_pending_cmd *cmd;
7452 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7456 cmd->cmd_complete(cmd, mgmt_status(status));
7457 mgmt_pending_remove(cmd);
7460 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7461 u8 link_type, u8 addr_type, u32 value,
7464 struct mgmt_ev_user_confirm_request ev;
7466 BT_DBG("%s", hdev->name);
7468 bacpy(&ev.addr.bdaddr, bdaddr);
7469 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7470 ev.confirm_hint = confirm_hint;
7471 ev.value = cpu_to_le32(value);
7473 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7477 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7478 u8 link_type, u8 addr_type)
7480 struct mgmt_ev_user_passkey_request ev;
7482 BT_DBG("%s", hdev->name);
7484 bacpy(&ev.addr.bdaddr, bdaddr);
7485 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7487 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7491 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7492 u8 link_type, u8 addr_type, u8 status,
7495 struct mgmt_pending_cmd *cmd;
7497 cmd = pending_find(opcode, hdev);
7501 cmd->cmd_complete(cmd, mgmt_status(status));
7502 mgmt_pending_remove(cmd);
7507 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7508 u8 link_type, u8 addr_type, u8 status)
7510 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7511 status, MGMT_OP_USER_CONFIRM_REPLY);
7514 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7515 u8 link_type, u8 addr_type, u8 status)
7517 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7519 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7522 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7523 u8 link_type, u8 addr_type, u8 status)
7525 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7526 status, MGMT_OP_USER_PASSKEY_REPLY);
7529 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7530 u8 link_type, u8 addr_type, u8 status)
7532 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7534 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7537 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7538 u8 link_type, u8 addr_type, u32 passkey,
7541 struct mgmt_ev_passkey_notify ev;
7543 BT_DBG("%s", hdev->name);
7545 bacpy(&ev.addr.bdaddr, bdaddr);
7546 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7547 ev.passkey = __cpu_to_le32(passkey);
7548 ev.entered = entered;
7550 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7553 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7555 struct mgmt_ev_auth_failed ev;
7556 struct mgmt_pending_cmd *cmd;
7557 u8 status = mgmt_status(hci_status);
7559 bacpy(&ev.addr.bdaddr, &conn->dst);
7560 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7563 cmd = find_pairing(conn);
7565 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7566 cmd ? cmd->sk : NULL);
7569 cmd->cmd_complete(cmd, status);
7570 mgmt_pending_remove(cmd);
7574 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7576 struct cmd_lookup match = { NULL, hdev };
7580 u8 mgmt_err = mgmt_status(status);
7581 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7582 cmd_status_rsp, &mgmt_err);
7586 if (test_bit(HCI_AUTH, &hdev->flags))
7587 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7589 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7591 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7595 new_settings(hdev, match.sk);
7601 static void clear_eir(struct hci_request *req)
7603 struct hci_dev *hdev = req->hdev;
7604 struct hci_cp_write_eir cp;
7606 if (!lmp_ext_inq_capable(hdev))
7609 memset(hdev->eir, 0, sizeof(hdev->eir));
7611 memset(&cp, 0, sizeof(cp));
7613 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7616 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7618 struct cmd_lookup match = { NULL, hdev };
7619 struct hci_request req;
7620 bool changed = false;
7623 u8 mgmt_err = mgmt_status(status);
7625 if (enable && hci_dev_test_and_clear_flag(hdev,
7627 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7628 new_settings(hdev, NULL);
7631 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7637 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7639 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7641 changed = hci_dev_test_and_clear_flag(hdev,
7644 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7647 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7650 new_settings(hdev, match.sk);
7655 hci_req_init(&req, hdev);
7657 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7658 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7659 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7660 sizeof(enable), &enable);
7666 hci_req_run(&req, NULL);
7669 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7671 struct cmd_lookup *match = data;
7673 if (match->sk == NULL) {
7674 match->sk = cmd->sk;
7675 sock_hold(match->sk);
7679 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7682 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7684 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7685 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7686 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7689 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7690 dev_class, 3, NULL);
7696 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7698 struct mgmt_cp_set_local_name ev;
7699 struct mgmt_pending_cmd *cmd;
7704 memset(&ev, 0, sizeof(ev));
7705 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7706 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7708 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7710 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7712 /* If this is a HCI command related to powering on the
7713 * HCI dev don't send any mgmt signals.
7715 if (pending_find(MGMT_OP_SET_POWERED, hdev))
7719 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7720 cmd ? cmd->sk : NULL);
7723 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7724 u8 *rand192, u8 *hash256, u8 *rand256,
7727 struct mgmt_pending_cmd *cmd;
7729 BT_DBG("%s status %u", hdev->name, status);
7731 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7736 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7737 mgmt_status(status));
7739 struct mgmt_rp_read_local_oob_data rp;
7740 size_t rp_size = sizeof(rp);
7742 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7743 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7745 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7746 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7747 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7749 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7752 mgmt_cmd_complete(cmd->sk, hdev->id,
7753 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7757 mgmt_pending_remove(cmd);
7760 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7764 for (i = 0; i < uuid_count; i++) {
7765 if (!memcmp(uuid, uuids[i], 16))
7772 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7776 while (parsed < eir_len) {
7777 u8 field_len = eir[0];
7784 if (eir_len - parsed < field_len + 1)
7788 case EIR_UUID16_ALL:
7789 case EIR_UUID16_SOME:
7790 for (i = 0; i + 3 <= field_len; i += 2) {
7791 memcpy(uuid, bluetooth_base_uuid, 16);
7792 uuid[13] = eir[i + 3];
7793 uuid[12] = eir[i + 2];
7794 if (has_uuid(uuid, uuid_count, uuids))
7798 case EIR_UUID32_ALL:
7799 case EIR_UUID32_SOME:
7800 for (i = 0; i + 5 <= field_len; i += 4) {
7801 memcpy(uuid, bluetooth_base_uuid, 16);
7802 uuid[15] = eir[i + 5];
7803 uuid[14] = eir[i + 4];
7804 uuid[13] = eir[i + 3];
7805 uuid[12] = eir[i + 2];
7806 if (has_uuid(uuid, uuid_count, uuids))
7810 case EIR_UUID128_ALL:
7811 case EIR_UUID128_SOME:
7812 for (i = 0; i + 17 <= field_len; i += 16) {
7813 memcpy(uuid, eir + i + 2, 16);
7814 if (has_uuid(uuid, uuid_count, uuids))
7820 parsed += field_len + 1;
7821 eir += field_len + 1;
7827 static void restart_le_scan(struct hci_dev *hdev)
7829 /* If controller is not scanning we are done. */
7830 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7833 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7834 hdev->discovery.scan_start +
7835 hdev->discovery.scan_duration))
7838 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
7839 DISCOV_LE_RESTART_DELAY);
7842 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7843 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7845 /* If a RSSI threshold has been specified, and
7846 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7847 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7848 * is set, let it through for further processing, as we might need to
7851 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7852 * the results are also dropped.
7854 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7855 (rssi == HCI_RSSI_INVALID ||
7856 (rssi < hdev->discovery.rssi &&
7857 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7860 if (hdev->discovery.uuid_count != 0) {
7861 /* If a list of UUIDs is provided in filter, results with no
7862 * matching UUID should be dropped.
7864 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7865 hdev->discovery.uuids) &&
7866 !eir_has_uuids(scan_rsp, scan_rsp_len,
7867 hdev->discovery.uuid_count,
7868 hdev->discovery.uuids))
7872 /* If duplicate filtering does not report RSSI changes, then restart
7873 * scanning to ensure updated result with updated RSSI values.
7875 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7876 restart_le_scan(hdev);
7878 /* Validate RSSI value against the RSSI threshold once more. */
7879 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7880 rssi < hdev->discovery.rssi)
7887 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7888 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7889 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7892 struct mgmt_ev_device_found *ev = (void *)buf;
7895 /* Don't send events for a non-kernel initiated discovery. With
7896 * LE one exception is if we have pend_le_reports > 0 in which
7897 * case we're doing passive scanning and want these events.
7899 if (!hci_discovery_active(hdev)) {
7900 if (link_type == ACL_LINK)
7902 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7906 if (hdev->discovery.result_filtering) {
7907 /* We are using service discovery */
7908 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7913 /* Make sure that the buffer is big enough. The 5 extra bytes
7914 * are for the potential CoD field.
7916 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7919 memset(buf, 0, sizeof(buf));
7921 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7922 * RSSI value was reported as 0 when not available. This behavior
7923 * is kept when using device discovery. This is required for full
7924 * backwards compatibility with the API.
7926 * However when using service discovery, the value 127 will be
7927 * returned when the RSSI is not available.
7929 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7930 link_type == ACL_LINK)
7933 bacpy(&ev->addr.bdaddr, bdaddr);
7934 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7936 ev->flags = cpu_to_le32(flags);
7939 /* Copy EIR or advertising data into event */
7940 memcpy(ev->eir, eir, eir_len);
7942 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7943 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7946 if (scan_rsp_len > 0)
7947 /* Append scan response data to event */
7948 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7950 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7951 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7953 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7956 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7957 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7959 struct mgmt_ev_device_found *ev;
7960 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7963 ev = (struct mgmt_ev_device_found *) buf;
7965 memset(buf, 0, sizeof(buf));
7967 bacpy(&ev->addr.bdaddr, bdaddr);
7968 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7971 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7974 ev->eir_len = cpu_to_le16(eir_len);
7976 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7979 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7981 struct mgmt_ev_discovering ev;
7983 BT_DBG("%s discovering %u", hdev->name, discovering);
7985 memset(&ev, 0, sizeof(ev));
7986 ev.type = hdev->discovery.type;
7987 ev.discovering = discovering;
7989 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7992 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7994 BT_DBG("%s status %u", hdev->name, status);
7997 void mgmt_reenable_advertising(struct hci_dev *hdev)
7999 struct hci_request req;
8001 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
8002 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
8005 hci_req_init(&req, hdev);
8006 enable_advertising(&req);
8007 hci_req_run(&req, adv_enable_complete);
8010 static struct hci_mgmt_chan chan = {
8011 .channel = HCI_CHANNEL_CONTROL,
8012 .handler_count = ARRAY_SIZE(mgmt_handlers),
8013 .handlers = mgmt_handlers,
8014 .hdev_init = mgmt_init_hdev,
8019 return hci_mgmt_chan_register(&chan);
8022 void mgmt_exit(void)
8024 hci_mgmt_chan_unregister(&chan);