2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 9
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
51 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
107 static const u16 mgmt_events[] = {
108 MGMT_EV_CONTROLLER_ERROR,
110 MGMT_EV_INDEX_REMOVED,
111 MGMT_EV_NEW_SETTINGS,
112 MGMT_EV_CLASS_OF_DEV_CHANGED,
113 MGMT_EV_LOCAL_NAME_CHANGED,
114 MGMT_EV_NEW_LINK_KEY,
115 MGMT_EV_NEW_LONG_TERM_KEY,
116 MGMT_EV_DEVICE_CONNECTED,
117 MGMT_EV_DEVICE_DISCONNECTED,
118 MGMT_EV_CONNECT_FAILED,
119 MGMT_EV_PIN_CODE_REQUEST,
120 MGMT_EV_USER_CONFIRM_REQUEST,
121 MGMT_EV_USER_PASSKEY_REQUEST,
123 MGMT_EV_DEVICE_FOUND,
125 MGMT_EV_DEVICE_BLOCKED,
126 MGMT_EV_DEVICE_UNBLOCKED,
127 MGMT_EV_DEVICE_UNPAIRED,
128 MGMT_EV_PASSKEY_NOTIFY,
131 MGMT_EV_DEVICE_ADDED,
132 MGMT_EV_DEVICE_REMOVED,
133 MGMT_EV_NEW_CONN_PARAM,
134 MGMT_EV_UNCONF_INDEX_ADDED,
135 MGMT_EV_UNCONF_INDEX_REMOVED,
136 MGMT_EV_NEW_CONFIG_OPTIONS,
137 MGMT_EV_EXT_INDEX_ADDED,
138 MGMT_EV_EXT_INDEX_REMOVED,
139 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 MGMT_EV_ADVERTISING_ADDED,
141 MGMT_EV_ADVERTISING_REMOVED,
144 static const u16 mgmt_untrusted_commands[] = {
145 MGMT_OP_READ_INDEX_LIST,
147 MGMT_OP_READ_UNCONF_INDEX_LIST,
148 MGMT_OP_READ_CONFIG_INFO,
149 MGMT_OP_READ_EXT_INDEX_LIST,
152 static const u16 mgmt_untrusted_events[] = {
154 MGMT_EV_INDEX_REMOVED,
155 MGMT_EV_NEW_SETTINGS,
156 MGMT_EV_CLASS_OF_DEV_CHANGED,
157 MGMT_EV_LOCAL_NAME_CHANGED,
158 MGMT_EV_UNCONF_INDEX_ADDED,
159 MGMT_EV_UNCONF_INDEX_REMOVED,
160 MGMT_EV_NEW_CONFIG_OPTIONS,
161 MGMT_EV_EXT_INDEX_ADDED,
162 MGMT_EV_EXT_INDEX_REMOVED,
165 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 "\x00\x00\x00\x00\x00\x00\x00\x00"
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table[] = {
173 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
174 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
175 MGMT_STATUS_FAILED, /* Hardware Failure */
176 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
177 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
178 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
179 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
180 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
181 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
182 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
183 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
184 MGMT_STATUS_BUSY, /* Command Disallowed */
185 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
186 MGMT_STATUS_REJECTED, /* Rejected Security */
187 MGMT_STATUS_REJECTED, /* Rejected Personal */
188 MGMT_STATUS_TIMEOUT, /* Host Timeout */
189 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
190 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
191 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
192 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
193 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
194 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
195 MGMT_STATUS_BUSY, /* Repeated Attempts */
196 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
197 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
198 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
199 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
200 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
201 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
202 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
203 MGMT_STATUS_FAILED, /* Unspecified Error */
204 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
205 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
206 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
207 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
208 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
209 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
210 MGMT_STATUS_FAILED, /* Unit Link Key Used */
211 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
212 MGMT_STATUS_TIMEOUT, /* Instant Passed */
213 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
214 MGMT_STATUS_FAILED, /* Transaction Collision */
215 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
216 MGMT_STATUS_REJECTED, /* QoS Rejected */
217 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
218 MGMT_STATUS_REJECTED, /* Insufficient Security */
219 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
220 MGMT_STATUS_BUSY, /* Role Switch Pending */
221 MGMT_STATUS_FAILED, /* Slot Violation */
222 MGMT_STATUS_FAILED, /* Role Switch Failed */
223 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
224 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
225 MGMT_STATUS_BUSY, /* Host Busy Pairing */
226 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
227 MGMT_STATUS_BUSY, /* Controller Busy */
228 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
229 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
230 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
231 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
232 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
235 static u8 mgmt_status(u8 hci_status)
237 if (hci_status < ARRAY_SIZE(mgmt_status_table))
238 return mgmt_status_table[hci_status];
240 return MGMT_STATUS_FAILED;
243 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
246 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
250 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
251 u16 len, int flag, struct sock *skip_sk)
253 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
257 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
258 u16 len, struct sock *skip_sk)
260 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 HCI_MGMT_GENERIC_EVENTS, skip_sk);
264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 struct sock *skip_sk)
267 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 HCI_SOCK_TRUSTED, skip_sk);
271 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
274 struct mgmt_rp_read_version rp;
276 BT_DBG("sock %p", sk);
278 rp.version = MGMT_VERSION;
279 rp.revision = cpu_to_le16(MGMT_REVISION);
281 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
285 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
288 struct mgmt_rp_read_commands *rp;
289 u16 num_commands, num_events;
293 BT_DBG("sock %p", sk);
295 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
296 num_commands = ARRAY_SIZE(mgmt_commands);
297 num_events = ARRAY_SIZE(mgmt_events);
299 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
300 num_events = ARRAY_SIZE(mgmt_untrusted_events);
303 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
305 rp = kmalloc(rp_size, GFP_KERNEL);
309 rp->num_commands = cpu_to_le16(num_commands);
310 rp->num_events = cpu_to_le16(num_events);
312 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
313 __le16 *opcode = rp->opcodes;
315 for (i = 0; i < num_commands; i++, opcode++)
316 put_unaligned_le16(mgmt_commands[i], opcode);
318 for (i = 0; i < num_events; i++, opcode++)
319 put_unaligned_le16(mgmt_events[i], opcode);
321 __le16 *opcode = rp->opcodes;
323 for (i = 0; i < num_commands; i++, opcode++)
324 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
326 for (i = 0; i < num_events; i++, opcode++)
327 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
330 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
337 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
340 struct mgmt_rp_read_index_list *rp;
346 BT_DBG("sock %p", sk);
348 read_lock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (d->dev_type == HCI_BREDR &&
353 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
357 rp_len = sizeof(*rp) + (2 * count);
358 rp = kmalloc(rp_len, GFP_ATOMIC);
360 read_unlock(&hci_dev_list_lock);
365 list_for_each_entry(d, &hci_dev_list, list) {
366 if (hci_dev_test_flag(d, HCI_SETUP) ||
367 hci_dev_test_flag(d, HCI_CONFIG) ||
368 hci_dev_test_flag(d, HCI_USER_CHANNEL))
371 /* Devices marked as raw-only are neither configured
372 * nor unconfigured controllers.
374 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
377 if (d->dev_type == HCI_BREDR &&
378 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
379 rp->index[count++] = cpu_to_le16(d->id);
380 BT_DBG("Added hci%u", d->id);
384 rp->num_controllers = cpu_to_le16(count);
385 rp_len = sizeof(*rp) + (2 * count);
387 read_unlock(&hci_dev_list_lock);
389 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
397 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
398 void *data, u16 data_len)
400 struct mgmt_rp_read_unconf_index_list *rp;
406 BT_DBG("sock %p", sk);
408 read_lock(&hci_dev_list_lock);
411 list_for_each_entry(d, &hci_dev_list, list) {
412 if (d->dev_type == HCI_BREDR &&
413 hci_dev_test_flag(d, HCI_UNCONFIGURED))
417 rp_len = sizeof(*rp) + (2 * count);
418 rp = kmalloc(rp_len, GFP_ATOMIC);
420 read_unlock(&hci_dev_list_lock);
425 list_for_each_entry(d, &hci_dev_list, list) {
426 if (hci_dev_test_flag(d, HCI_SETUP) ||
427 hci_dev_test_flag(d, HCI_CONFIG) ||
428 hci_dev_test_flag(d, HCI_USER_CHANNEL))
431 /* Devices marked as raw-only are neither configured
432 * nor unconfigured controllers.
434 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
437 if (d->dev_type == HCI_BREDR &&
438 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
439 rp->index[count++] = cpu_to_le16(d->id);
440 BT_DBG("Added hci%u", d->id);
444 rp->num_controllers = cpu_to_le16(count);
445 rp_len = sizeof(*rp) + (2 * count);
447 read_unlock(&hci_dev_list_lock);
449 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
450 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
457 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
458 void *data, u16 data_len)
460 struct mgmt_rp_read_ext_index_list *rp;
466 BT_DBG("sock %p", sk);
468 read_lock(&hci_dev_list_lock);
471 list_for_each_entry(d, &hci_dev_list, list) {
472 if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
476 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
477 rp = kmalloc(rp_len, GFP_ATOMIC);
479 read_unlock(&hci_dev_list_lock);
484 list_for_each_entry(d, &hci_dev_list, list) {
485 if (hci_dev_test_flag(d, HCI_SETUP) ||
486 hci_dev_test_flag(d, HCI_CONFIG) ||
487 hci_dev_test_flag(d, HCI_USER_CHANNEL))
490 /* Devices marked as raw-only are neither configured
491 * nor unconfigured controllers.
493 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
496 if (d->dev_type == HCI_BREDR) {
497 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
498 rp->entry[count].type = 0x01;
500 rp->entry[count].type = 0x00;
501 } else if (d->dev_type == HCI_AMP) {
502 rp->entry[count].type = 0x02;
507 rp->entry[count].bus = d->bus;
508 rp->entry[count++].index = cpu_to_le16(d->id);
509 BT_DBG("Added hci%u", d->id);
512 rp->num_controllers = cpu_to_le16(count);
513 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
515 read_unlock(&hci_dev_list_lock);
517 /* If this command is called at least once, then all the
518 * default index and unconfigured index events are disabled
519 * and from now on only extended index events are used.
521 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
522 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
523 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
525 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
526 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
533 static bool is_configured(struct hci_dev *hdev)
535 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
536 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
539 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
540 !bacmp(&hdev->public_addr, BDADDR_ANY))
546 static __le32 get_missing_options(struct hci_dev *hdev)
550 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
552 options |= MGMT_OPTION_EXTERNAL_CONFIG;
554 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
555 !bacmp(&hdev->public_addr, BDADDR_ANY))
556 options |= MGMT_OPTION_PUBLIC_ADDRESS;
558 return cpu_to_le32(options);
561 static int new_options(struct hci_dev *hdev, struct sock *skip)
563 __le32 options = get_missing_options(hdev);
565 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
566 sizeof(options), skip);
569 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
571 __le32 options = get_missing_options(hdev);
573 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
577 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
578 void *data, u16 data_len)
580 struct mgmt_rp_read_config_info rp;
583 BT_DBG("sock %p %s", sk, hdev->name);
587 memset(&rp, 0, sizeof(rp));
588 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
590 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
591 options |= MGMT_OPTION_EXTERNAL_CONFIG;
593 if (hdev->set_bdaddr)
594 options |= MGMT_OPTION_PUBLIC_ADDRESS;
596 rp.supported_options = cpu_to_le32(options);
597 rp.missing_options = get_missing_options(hdev);
599 hci_dev_unlock(hdev);
601 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
605 static u32 get_supported_settings(struct hci_dev *hdev)
609 settings |= MGMT_SETTING_POWERED;
610 settings |= MGMT_SETTING_BONDABLE;
611 settings |= MGMT_SETTING_DEBUG_KEYS;
612 settings |= MGMT_SETTING_CONNECTABLE;
613 settings |= MGMT_SETTING_DISCOVERABLE;
615 if (lmp_bredr_capable(hdev)) {
616 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
617 settings |= MGMT_SETTING_FAST_CONNECTABLE;
618 settings |= MGMT_SETTING_BREDR;
619 settings |= MGMT_SETTING_LINK_SECURITY;
621 if (lmp_ssp_capable(hdev)) {
622 settings |= MGMT_SETTING_SSP;
623 settings |= MGMT_SETTING_HS;
626 if (lmp_sc_capable(hdev))
627 settings |= MGMT_SETTING_SECURE_CONN;
630 if (lmp_le_capable(hdev)) {
631 settings |= MGMT_SETTING_LE;
632 settings |= MGMT_SETTING_ADVERTISING;
633 settings |= MGMT_SETTING_SECURE_CONN;
634 settings |= MGMT_SETTING_PRIVACY;
635 settings |= MGMT_SETTING_STATIC_ADDRESS;
638 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
640 settings |= MGMT_SETTING_CONFIGURATION;
645 static u32 get_current_settings(struct hci_dev *hdev)
649 if (hdev_is_powered(hdev))
650 settings |= MGMT_SETTING_POWERED;
652 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
653 settings |= MGMT_SETTING_CONNECTABLE;
655 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
656 settings |= MGMT_SETTING_FAST_CONNECTABLE;
658 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
659 settings |= MGMT_SETTING_DISCOVERABLE;
661 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
662 settings |= MGMT_SETTING_BONDABLE;
664 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
665 settings |= MGMT_SETTING_BREDR;
667 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
668 settings |= MGMT_SETTING_LE;
670 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
671 settings |= MGMT_SETTING_LINK_SECURITY;
673 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
674 settings |= MGMT_SETTING_SSP;
676 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
677 settings |= MGMT_SETTING_HS;
679 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
680 settings |= MGMT_SETTING_ADVERTISING;
682 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
683 settings |= MGMT_SETTING_SECURE_CONN;
685 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
686 settings |= MGMT_SETTING_DEBUG_KEYS;
688 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
689 settings |= MGMT_SETTING_PRIVACY;
691 /* The current setting for static address has two purposes. The
692 * first is to indicate if the static address will be used and
693 * the second is to indicate if it is actually set.
695 * This means if the static address is not configured, this flag
696 * will never be set. If the address is configured, then if the
697 * address is actually used decides if the flag is set or not.
699 * For single mode LE only controllers and dual-mode controllers
700 * with BR/EDR disabled, the existence of the static address will
703 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
704 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
705 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
706 if (bacmp(&hdev->static_addr, BDADDR_ANY))
707 settings |= MGMT_SETTING_STATIC_ADDRESS;
713 #define PNP_INFO_SVCLASS_ID 0x1200
715 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
717 u8 *ptr = data, *uuids_start = NULL;
718 struct bt_uuid *uuid;
723 list_for_each_entry(uuid, &hdev->uuids, list) {
726 if (uuid->size != 16)
729 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
733 if (uuid16 == PNP_INFO_SVCLASS_ID)
739 uuids_start[1] = EIR_UUID16_ALL;
743 /* Stop if not enough space to put next UUID */
744 if ((ptr - data) + sizeof(u16) > len) {
745 uuids_start[1] = EIR_UUID16_SOME;
749 *ptr++ = (uuid16 & 0x00ff);
750 *ptr++ = (uuid16 & 0xff00) >> 8;
751 uuids_start[0] += sizeof(uuid16);
757 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
759 u8 *ptr = data, *uuids_start = NULL;
760 struct bt_uuid *uuid;
765 list_for_each_entry(uuid, &hdev->uuids, list) {
766 if (uuid->size != 32)
772 uuids_start[1] = EIR_UUID32_ALL;
776 /* Stop if not enough space to put next UUID */
777 if ((ptr - data) + sizeof(u32) > len) {
778 uuids_start[1] = EIR_UUID32_SOME;
782 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
784 uuids_start[0] += sizeof(u32);
790 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
792 u8 *ptr = data, *uuids_start = NULL;
793 struct bt_uuid *uuid;
798 list_for_each_entry(uuid, &hdev->uuids, list) {
799 if (uuid->size != 128)
805 uuids_start[1] = EIR_UUID128_ALL;
809 /* Stop if not enough space to put next UUID */
810 if ((ptr - data) + 16 > len) {
811 uuids_start[1] = EIR_UUID128_SOME;
815 memcpy(ptr, uuid->uuid, 16);
817 uuids_start[0] += 16;
823 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
825 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
828 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
829 struct hci_dev *hdev,
832 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
835 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
840 name_len = strlen(hdev->dev_name);
842 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
844 if (name_len > max_len) {
846 ptr[1] = EIR_NAME_SHORT;
848 ptr[1] = EIR_NAME_COMPLETE;
850 ptr[0] = name_len + 1;
852 memcpy(ptr + 2, hdev->dev_name, name_len);
854 ad_len += (name_len + 2);
855 ptr += (name_len + 2);
861 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
863 /* TODO: Set the appropriate entries based on advertising instance flags
864 * here once flags other than 0 are supported.
866 memcpy(ptr, hdev->adv_instance.scan_rsp_data,
867 hdev->adv_instance.scan_rsp_len);
869 return hdev->adv_instance.scan_rsp_len;
872 static void update_scan_rsp_data_for_instance(struct hci_request *req,
875 struct hci_dev *hdev = req->hdev;
876 struct hci_cp_le_set_scan_rsp_data cp;
879 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 memset(&cp, 0, sizeof(cp));
885 len = create_instance_scan_rsp_data(hdev, cp.data);
887 len = create_default_scan_rsp_data(hdev, cp.data);
889 if (hdev->scan_rsp_data_len == len &&
890 !memcmp(cp.data, hdev->scan_rsp_data, len))
893 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
894 hdev->scan_rsp_data_len = len;
898 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
901 static void update_scan_rsp_data(struct hci_request *req)
903 struct hci_dev *hdev = req->hdev;
906 /* The "Set Advertising" setting supersedes the "Add Advertising"
907 * setting. Here we set the scan response data based on which
908 * setting was set. When neither apply, default to the global settings,
909 * represented by instance "0".
911 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
912 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
917 update_scan_rsp_data_for_instance(req, instance);
920 static u8 get_adv_discov_flags(struct hci_dev *hdev)
922 struct mgmt_pending_cmd *cmd;
924 /* If there's a pending mgmt command the flags will not yet have
925 * their final values, so check for this first.
927 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
929 struct mgmt_mode *cp = cmd->param;
931 return LE_AD_GENERAL;
932 else if (cp->val == 0x02)
933 return LE_AD_LIMITED;
935 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
936 return LE_AD_LIMITED;
937 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
938 return LE_AD_GENERAL;
944 static u8 get_current_adv_instance(struct hci_dev *hdev)
946 /* The "Set Advertising" setting supersedes the "Add Advertising"
947 * setting. Here we set the advertising data based on which
948 * setting was set. When neither apply, default to the global settings,
949 * represented by instance "0".
951 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
952 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
958 static bool get_connectable(struct hci_dev *hdev)
960 struct mgmt_pending_cmd *cmd;
962 /* If there's a pending mgmt command the flag will not yet have
963 * it's final value, so check for this first.
965 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
967 struct mgmt_mode *cp = cmd->param;
972 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
975 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
982 if (instance == 0x01)
983 return hdev->adv_instance.flags;
985 /* Instance 0 always manages the "Tx Power" and "Flags" fields */
986 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
988 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting corresponds
989 * to the "connectable" instance flag.
991 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
992 flags |= MGMT_ADV_FLAG_CONNECTABLE;
997 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
999 /* Ignore instance 0 and other unsupported instances */
1000 if (instance != 0x01)
1003 /* TODO: Take into account the "appearance" and "local-name" flags here.
1004 * These are currently being ignored as they are not supported.
1006 return hdev->adv_instance.scan_rsp_len;
1009 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1011 u8 ad_len = 0, flags = 0;
1012 u32 instance_flags = get_adv_instance_flags(hdev, instance);
1014 /* The Add Advertising command allows userspace to set both the general
1015 * and limited discoverable flags.
1017 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1018 flags |= LE_AD_GENERAL;
1020 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1021 flags |= LE_AD_LIMITED;
1023 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1024 /* If a discovery flag wasn't provided, simply use the global
1028 flags |= get_adv_discov_flags(hdev);
1030 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1031 flags |= LE_AD_NO_BREDR;
1033 /* If flags would still be empty, then there is no need to
1034 * include the "Flags" AD field".
1046 /* Provide Tx Power only if we can provide a valid value for it */
1047 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1048 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1050 ptr[1] = EIR_TX_POWER;
1051 ptr[2] = (u8)hdev->adv_tx_power;
1058 memcpy(ptr, hdev->adv_instance.adv_data,
1059 hdev->adv_instance.adv_data_len);
1060 ad_len += hdev->adv_instance.adv_data_len;
1066 static void update_adv_data_for_instance(struct hci_request *req, u8 instance)
1068 struct hci_dev *hdev = req->hdev;
1069 struct hci_cp_le_set_adv_data cp;
1072 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1075 memset(&cp, 0, sizeof(cp));
1077 len = create_instance_adv_data(hdev, instance, cp.data);
1079 /* There's nothing to do if the data hasn't changed */
1080 if (hdev->adv_data_len == len &&
1081 memcmp(cp.data, hdev->adv_data, len) == 0)
1084 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1085 hdev->adv_data_len = len;
1089 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1092 static void update_adv_data(struct hci_request *req)
1094 struct hci_dev *hdev = req->hdev;
1095 u8 instance = get_current_adv_instance(hdev);
1097 update_adv_data_for_instance(req, instance);
1100 int mgmt_update_adv_data(struct hci_dev *hdev)
1102 struct hci_request req;
1104 hci_req_init(&req, hdev);
1105 update_adv_data(&req);
1107 return hci_req_run(&req, NULL);
1110 static void create_eir(struct hci_dev *hdev, u8 *data)
1115 name_len = strlen(hdev->dev_name);
1119 if (name_len > 48) {
1121 ptr[1] = EIR_NAME_SHORT;
1123 ptr[1] = EIR_NAME_COMPLETE;
1125 /* EIR Data length */
1126 ptr[0] = name_len + 1;
1128 memcpy(ptr + 2, hdev->dev_name, name_len);
1130 ptr += (name_len + 2);
1133 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1135 ptr[1] = EIR_TX_POWER;
1136 ptr[2] = (u8) hdev->inq_tx_power;
1141 if (hdev->devid_source > 0) {
1143 ptr[1] = EIR_DEVICE_ID;
1145 put_unaligned_le16(hdev->devid_source, ptr + 2);
1146 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1147 put_unaligned_le16(hdev->devid_product, ptr + 6);
1148 put_unaligned_le16(hdev->devid_version, ptr + 8);
1153 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1154 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1155 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1158 static void update_eir(struct hci_request *req)
1160 struct hci_dev *hdev = req->hdev;
1161 struct hci_cp_write_eir cp;
1163 if (!hdev_is_powered(hdev))
1166 if (!lmp_ext_inq_capable(hdev))
1169 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1172 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1175 memset(&cp, 0, sizeof(cp));
1177 create_eir(hdev, cp.data);
1179 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1182 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1184 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1187 static u8 get_service_classes(struct hci_dev *hdev)
1189 struct bt_uuid *uuid;
1192 list_for_each_entry(uuid, &hdev->uuids, list)
1193 val |= uuid->svc_hint;
1198 static void update_class(struct hci_request *req)
1200 struct hci_dev *hdev = req->hdev;
1203 BT_DBG("%s", hdev->name);
1205 if (!hdev_is_powered(hdev))
1208 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1211 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1214 cod[0] = hdev->minor_class;
1215 cod[1] = hdev->major_class;
1216 cod[2] = get_service_classes(hdev);
1218 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1221 if (memcmp(cod, hdev->dev_class, 3) == 0)
1224 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1227 static void disable_advertising(struct hci_request *req)
1231 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1234 static void enable_advertising(struct hci_request *req)
1236 struct hci_dev *hdev = req->hdev;
1237 struct hci_cp_le_set_adv_param cp;
1238 u8 own_addr_type, enable = 0x01;
1243 if (hci_conn_num(hdev, LE_LINK) > 0)
1246 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1247 disable_advertising(req);
1249 /* Clear the HCI_LE_ADV bit temporarily so that the
1250 * hci_update_random_address knows that it's safe to go ahead
1251 * and write a new random address. The flag will be set back on
1252 * as soon as the SET_ADV_ENABLE HCI command completes.
1254 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1256 instance = get_current_adv_instance(hdev);
1257 flags = get_adv_instance_flags(hdev, instance);
1259 /* If the "connectable" instance flag was not set, then choose between
1260 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1262 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1263 get_connectable(hdev);
1265 /* Set require_privacy to true only when non-connectable
1266 * advertising is used. In that case it is fine to use a
1267 * non-resolvable private address.
1269 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1272 memset(&cp, 0, sizeof(cp));
1273 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1274 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1277 cp.type = LE_ADV_IND;
1278 else if (get_adv_instance_scan_rsp_len(hdev, instance))
1279 cp.type = LE_ADV_SCAN_IND;
1281 cp.type = LE_ADV_NONCONN_IND;
1283 cp.own_address_type = own_addr_type;
1284 cp.channel_map = hdev->le_adv_channel_map;
1286 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1288 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1291 static void service_cache_off(struct work_struct *work)
1293 struct hci_dev *hdev = container_of(work, struct hci_dev,
1294 service_cache.work);
1295 struct hci_request req;
1297 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1300 hci_req_init(&req, hdev);
1307 hci_dev_unlock(hdev);
1309 hci_req_run(&req, NULL);
1312 static void rpa_expired(struct work_struct *work)
1314 struct hci_dev *hdev = container_of(work, struct hci_dev,
1316 struct hci_request req;
1320 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1322 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1325 /* The generation of a new RPA and programming it into the
1326 * controller happens in the enable_advertising() function.
1328 hci_req_init(&req, hdev);
1329 enable_advertising(&req);
1330 hci_req_run(&req, NULL);
1333 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1335 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1338 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1339 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1341 /* Non-mgmt controlled devices get this bit set
1342 * implicitly so that pairing works for them, however
1343 * for mgmt we require user-space to explicitly enable
1346 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1349 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1350 void *data, u16 data_len)
1352 struct mgmt_rp_read_info rp;
1354 BT_DBG("sock %p %s", sk, hdev->name);
1358 memset(&rp, 0, sizeof(rp));
1360 bacpy(&rp.bdaddr, &hdev->bdaddr);
1362 rp.version = hdev->hci_ver;
1363 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1365 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1366 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1368 memcpy(rp.dev_class, hdev->dev_class, 3);
1370 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1371 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1373 hci_dev_unlock(hdev);
1375 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1379 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1381 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1383 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1387 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1389 BT_DBG("%s status 0x%02x", hdev->name, status);
1391 if (hci_conn_count(hdev) == 0) {
1392 cancel_delayed_work(&hdev->power_off);
1393 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1397 static bool hci_stop_discovery(struct hci_request *req)
1399 struct hci_dev *hdev = req->hdev;
1400 struct hci_cp_remote_name_req_cancel cp;
1401 struct inquiry_entry *e;
1403 switch (hdev->discovery.state) {
1404 case DISCOVERY_FINDING:
1405 if (test_bit(HCI_INQUIRY, &hdev->flags))
1406 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1408 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1409 cancel_delayed_work(&hdev->le_scan_disable);
1410 hci_req_add_le_scan_disable(req);
1415 case DISCOVERY_RESOLVING:
1416 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1421 bacpy(&cp.bdaddr, &e->data.bdaddr);
1422 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1428 /* Passive scanning */
1429 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1430 hci_req_add_le_scan_disable(req);
1440 static void advertising_added(struct sock *sk, struct hci_dev *hdev,
1443 struct mgmt_ev_advertising_added ev;
1445 ev.instance = instance;
1447 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1450 static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
1453 struct mgmt_ev_advertising_removed ev;
1455 ev.instance = instance;
1457 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1460 static void clear_adv_instance(struct hci_dev *hdev)
1462 struct hci_request req;
1464 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1467 if (hdev->adv_instance.timeout)
1468 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
1470 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
1471 advertising_removed(NULL, hdev, 1);
1472 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1474 if (!hdev_is_powered(hdev) ||
1475 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1478 hci_req_init(&req, hdev);
1479 disable_advertising(&req);
1480 hci_req_run(&req, NULL);
1483 static int clean_up_hci_state(struct hci_dev *hdev)
1485 struct hci_request req;
1486 struct hci_conn *conn;
1487 bool discov_stopped;
1490 hci_req_init(&req, hdev);
1492 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1493 test_bit(HCI_PSCAN, &hdev->flags)) {
1495 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1498 if (hdev->adv_instance.timeout)
1499 clear_adv_instance(hdev);
1501 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1502 disable_advertising(&req);
1504 discov_stopped = hci_stop_discovery(&req);
1506 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1507 struct hci_cp_disconnect dc;
1508 struct hci_cp_reject_conn_req rej;
1510 switch (conn->state) {
1513 dc.handle = cpu_to_le16(conn->handle);
1514 dc.reason = 0x15; /* Terminated due to Power Off */
1515 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1518 if (conn->type == LE_LINK)
1519 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1521 else if (conn->type == ACL_LINK)
1522 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1526 bacpy(&rej.bdaddr, &conn->dst);
1527 rej.reason = 0x15; /* Terminated due to Power Off */
1528 if (conn->type == ACL_LINK)
1529 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1531 else if (conn->type == SCO_LINK)
1532 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1538 err = hci_req_run(&req, clean_up_hci_complete);
1539 if (!err && discov_stopped)
1540 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1545 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1548 struct mgmt_mode *cp = data;
1549 struct mgmt_pending_cmd *cmd;
1552 BT_DBG("request for %s", hdev->name);
1554 if (cp->val != 0x00 && cp->val != 0x01)
1555 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1556 MGMT_STATUS_INVALID_PARAMS);
1560 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1561 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1566 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1567 cancel_delayed_work(&hdev->power_off);
1570 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1572 err = mgmt_powered(hdev, 1);
1577 if (!!cp->val == hdev_is_powered(hdev)) {
1578 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1582 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1589 queue_work(hdev->req_workqueue, &hdev->power_on);
1592 /* Disconnect connections, stop scans, etc */
1593 err = clean_up_hci_state(hdev);
1595 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1596 HCI_POWER_OFF_TIMEOUT);
1598 /* ENODATA means there were no HCI commands queued */
1599 if (err == -ENODATA) {
1600 cancel_delayed_work(&hdev->power_off);
1601 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1607 hci_dev_unlock(hdev);
1611 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1613 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1615 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1619 int mgmt_new_settings(struct hci_dev *hdev)
1621 return new_settings(hdev, NULL);
1626 struct hci_dev *hdev;
1630 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1632 struct cmd_lookup *match = data;
1634 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1636 list_del(&cmd->list);
1638 if (match->sk == NULL) {
1639 match->sk = cmd->sk;
1640 sock_hold(match->sk);
1643 mgmt_pending_free(cmd);
1646 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1650 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1651 mgmt_pending_remove(cmd);
1654 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1656 if (cmd->cmd_complete) {
1659 cmd->cmd_complete(cmd, *status);
1660 mgmt_pending_remove(cmd);
1665 cmd_status_rsp(cmd, data);
1668 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1670 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1671 cmd->param, cmd->param_len);
1674 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1676 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1677 cmd->param, sizeof(struct mgmt_addr_info));
1680 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1682 if (!lmp_bredr_capable(hdev))
1683 return MGMT_STATUS_NOT_SUPPORTED;
1684 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1685 return MGMT_STATUS_REJECTED;
1687 return MGMT_STATUS_SUCCESS;
1690 static u8 mgmt_le_support(struct hci_dev *hdev)
1692 if (!lmp_le_capable(hdev))
1693 return MGMT_STATUS_NOT_SUPPORTED;
1694 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1695 return MGMT_STATUS_REJECTED;
1697 return MGMT_STATUS_SUCCESS;
1700 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1703 struct mgmt_pending_cmd *cmd;
1704 struct mgmt_mode *cp;
1705 struct hci_request req;
1708 BT_DBG("status 0x%02x", status);
1712 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1717 u8 mgmt_err = mgmt_status(status);
1718 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1719 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1725 changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1727 if (hdev->discov_timeout > 0) {
1728 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1729 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1733 changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1736 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1739 new_settings(hdev, cmd->sk);
1741 /* When the discoverable mode gets changed, make sure
1742 * that class of device has the limited discoverable
1743 * bit correctly set. Also update page scan based on whitelist
1746 hci_req_init(&req, hdev);
1747 __hci_update_page_scan(&req);
1749 hci_req_run(&req, NULL);
1752 mgmt_pending_remove(cmd);
1755 hci_dev_unlock(hdev);
1758 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1761 struct mgmt_cp_set_discoverable *cp = data;
1762 struct mgmt_pending_cmd *cmd;
1763 struct hci_request req;
1768 BT_DBG("request for %s", hdev->name);
1770 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1771 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1772 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1773 MGMT_STATUS_REJECTED);
1775 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1776 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1777 MGMT_STATUS_INVALID_PARAMS);
1779 timeout = __le16_to_cpu(cp->timeout);
1781 /* Disabling discoverable requires that no timeout is set,
1782 * and enabling limited discoverable requires a timeout.
1784 if ((cp->val == 0x00 && timeout > 0) ||
1785 (cp->val == 0x02 && timeout == 0))
1786 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1787 MGMT_STATUS_INVALID_PARAMS);
1791 if (!hdev_is_powered(hdev) && timeout > 0) {
1792 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1793 MGMT_STATUS_NOT_POWERED);
1797 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1798 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1799 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1804 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1805 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1806 MGMT_STATUS_REJECTED);
1810 if (!hdev_is_powered(hdev)) {
1811 bool changed = false;
1813 /* Setting limited discoverable when powered off is
1814 * not a valid operation since it requires a timeout
1815 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1817 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1818 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1822 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1827 err = new_settings(hdev, sk);
1832 /* If the current mode is the same, then just update the timeout
1833 * value with the new value. And if only the timeout gets updated,
1834 * then no need for any HCI transactions.
1836 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1837 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1838 HCI_LIMITED_DISCOVERABLE)) {
1839 cancel_delayed_work(&hdev->discov_off);
1840 hdev->discov_timeout = timeout;
1842 if (cp->val && hdev->discov_timeout > 0) {
1843 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1844 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1848 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1852 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1858 /* Cancel any potential discoverable timeout that might be
1859 * still active and store new timeout value. The arming of
1860 * the timeout happens in the complete handler.
1862 cancel_delayed_work(&hdev->discov_off);
1863 hdev->discov_timeout = timeout;
1865 /* Limited discoverable mode */
1866 if (cp->val == 0x02)
1867 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1869 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1871 hci_req_init(&req, hdev);
1873 /* The procedure for LE-only controllers is much simpler - just
1874 * update the advertising data.
1876 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1882 struct hci_cp_write_current_iac_lap hci_cp;
1884 if (cp->val == 0x02) {
1885 /* Limited discoverable mode */
1886 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1887 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1888 hci_cp.iac_lap[1] = 0x8b;
1889 hci_cp.iac_lap[2] = 0x9e;
1890 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1891 hci_cp.iac_lap[4] = 0x8b;
1892 hci_cp.iac_lap[5] = 0x9e;
1894 /* General discoverable mode */
1896 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1897 hci_cp.iac_lap[1] = 0x8b;
1898 hci_cp.iac_lap[2] = 0x9e;
1901 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1902 (hci_cp.num_iac * 3) + 1, &hci_cp);
1904 scan |= SCAN_INQUIRY;
1906 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1909 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1912 update_adv_data(&req);
1914 err = hci_req_run(&req, set_discoverable_complete);
1916 mgmt_pending_remove(cmd);
1919 hci_dev_unlock(hdev);
1923 static void write_fast_connectable(struct hci_request *req, bool enable)
1925 struct hci_dev *hdev = req->hdev;
1926 struct hci_cp_write_page_scan_activity acp;
1929 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1932 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1936 type = PAGE_SCAN_TYPE_INTERLACED;
1938 /* 160 msec page scan interval */
1939 acp.interval = cpu_to_le16(0x0100);
1941 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1943 /* default 1.28 sec page scan */
1944 acp.interval = cpu_to_le16(0x0800);
1947 acp.window = cpu_to_le16(0x0012);
1949 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1950 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1951 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1954 if (hdev->page_scan_type != type)
1955 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1958 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1961 struct mgmt_pending_cmd *cmd;
1962 struct mgmt_mode *cp;
1963 bool conn_changed, discov_changed;
1965 BT_DBG("status 0x%02x", status);
1969 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1974 u8 mgmt_err = mgmt_status(status);
1975 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1981 conn_changed = !hci_dev_test_and_set_flag(hdev,
1983 discov_changed = false;
1985 conn_changed = hci_dev_test_and_clear_flag(hdev,
1987 discov_changed = hci_dev_test_and_clear_flag(hdev,
1991 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1993 if (conn_changed || discov_changed) {
1994 new_settings(hdev, cmd->sk);
1995 hci_update_page_scan(hdev);
1997 mgmt_update_adv_data(hdev);
1998 hci_update_background_scan(hdev);
2002 mgmt_pending_remove(cmd);
2005 hci_dev_unlock(hdev);
2008 static int set_connectable_update_settings(struct hci_dev *hdev,
2009 struct sock *sk, u8 val)
2011 bool changed = false;
2014 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
2018 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
2020 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
2021 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2024 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
2029 hci_update_page_scan(hdev);
2030 hci_update_background_scan(hdev);
2031 return new_settings(hdev, sk);
2037 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
2040 struct mgmt_mode *cp = data;
2041 struct mgmt_pending_cmd *cmd;
2042 struct hci_request req;
2046 BT_DBG("request for %s", hdev->name);
2048 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2049 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2050 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2051 MGMT_STATUS_REJECTED);
2053 if (cp->val != 0x00 && cp->val != 0x01)
2054 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2055 MGMT_STATUS_INVALID_PARAMS);
2059 if (!hdev_is_powered(hdev)) {
2060 err = set_connectable_update_settings(hdev, sk, cp->val);
2064 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
2065 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2066 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2071 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2077 hci_req_init(&req, hdev);
2079 /* If BR/EDR is not enabled and we disable advertising as a
2080 * by-product of disabling connectable, we need to update the
2081 * advertising flags.
2083 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2085 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2086 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2088 update_adv_data(&req);
2089 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2093 /* If we don't have any whitelist entries just
2094 * disable all scanning. If there are entries
2095 * and we had both page and inquiry scanning
2096 * enabled then fall back to only page scanning.
2097 * Otherwise no changes are needed.
2099 if (list_empty(&hdev->whitelist))
2100 scan = SCAN_DISABLED;
2101 else if (test_bit(HCI_ISCAN, &hdev->flags))
2104 goto no_scan_update;
2106 if (test_bit(HCI_ISCAN, &hdev->flags) &&
2107 hdev->discov_timeout > 0)
2108 cancel_delayed_work(&hdev->discov_off);
2111 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2115 /* Update the advertising parameters if necessary */
2116 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2117 enable_advertising(&req);
2119 err = hci_req_run(&req, set_connectable_complete);
2121 mgmt_pending_remove(cmd);
2122 if (err == -ENODATA)
2123 err = set_connectable_update_settings(hdev, sk,
2129 hci_dev_unlock(hdev);
2133 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2136 struct mgmt_mode *cp = data;
2140 BT_DBG("request for %s", hdev->name);
2142 if (cp->val != 0x00 && cp->val != 0x01)
2143 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2144 MGMT_STATUS_INVALID_PARAMS);
2149 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2151 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2153 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2158 err = new_settings(hdev, sk);
2161 hci_dev_unlock(hdev);
2165 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2168 struct mgmt_mode *cp = data;
2169 struct mgmt_pending_cmd *cmd;
2173 BT_DBG("request for %s", hdev->name);
2175 status = mgmt_bredr_support(hdev);
2177 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2180 if (cp->val != 0x00 && cp->val != 0x01)
2181 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2182 MGMT_STATUS_INVALID_PARAMS);
2186 if (!hdev_is_powered(hdev)) {
2187 bool changed = false;
2189 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2190 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2194 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2199 err = new_settings(hdev, sk);
2204 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2205 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2212 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2213 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2217 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2223 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2225 mgmt_pending_remove(cmd);
2230 hci_dev_unlock(hdev);
2234 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2236 struct mgmt_mode *cp = data;
2237 struct mgmt_pending_cmd *cmd;
2241 BT_DBG("request for %s", hdev->name);
2243 status = mgmt_bredr_support(hdev);
2245 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2247 if (!lmp_ssp_capable(hdev))
2248 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2249 MGMT_STATUS_NOT_SUPPORTED);
2251 if (cp->val != 0x00 && cp->val != 0x01)
2252 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2253 MGMT_STATUS_INVALID_PARAMS);
2257 if (!hdev_is_powered(hdev)) {
2261 changed = !hci_dev_test_and_set_flag(hdev,
2264 changed = hci_dev_test_and_clear_flag(hdev,
2267 changed = hci_dev_test_and_clear_flag(hdev,
2270 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2273 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2278 err = new_settings(hdev, sk);
2283 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2284 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2289 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2290 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2294 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2300 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2301 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2302 sizeof(cp->val), &cp->val);
2304 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2306 mgmt_pending_remove(cmd);
2311 hci_dev_unlock(hdev);
2315 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2317 struct mgmt_mode *cp = data;
2322 BT_DBG("request for %s", hdev->name);
2324 status = mgmt_bredr_support(hdev);
2326 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2328 if (!lmp_ssp_capable(hdev))
2329 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2330 MGMT_STATUS_NOT_SUPPORTED);
2332 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2333 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2334 MGMT_STATUS_REJECTED);
2336 if (cp->val != 0x00 && cp->val != 0x01)
2337 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2338 MGMT_STATUS_INVALID_PARAMS);
2342 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2343 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2349 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2351 if (hdev_is_powered(hdev)) {
2352 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2353 MGMT_STATUS_REJECTED);
2357 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2360 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2365 err = new_settings(hdev, sk);
2368 hci_dev_unlock(hdev);
2372 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2374 struct cmd_lookup match = { NULL, hdev };
2379 u8 mgmt_err = mgmt_status(status);
2381 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2386 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2388 new_settings(hdev, match.sk);
2393 /* Make sure the controller has a good default for
2394 * advertising data. Restrict the update to when LE
2395 * has actually been enabled. During power on, the
2396 * update in powered_update_hci will take care of it.
2398 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2399 struct hci_request req;
2401 hci_req_init(&req, hdev);
2402 update_adv_data(&req);
2403 update_scan_rsp_data(&req);
2404 __hci_update_background_scan(&req);
2405 hci_req_run(&req, NULL);
2409 hci_dev_unlock(hdev);
2412 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2414 struct mgmt_mode *cp = data;
2415 struct hci_cp_write_le_host_supported hci_cp;
2416 struct mgmt_pending_cmd *cmd;
2417 struct hci_request req;
2421 BT_DBG("request for %s", hdev->name);
2423 if (!lmp_le_capable(hdev))
2424 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2425 MGMT_STATUS_NOT_SUPPORTED);
2427 if (cp->val != 0x00 && cp->val != 0x01)
2428 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2429 MGMT_STATUS_INVALID_PARAMS);
2431 /* Bluetooth single mode LE only controllers or dual-mode
2432 * controllers configured as LE only devices, do not allow
2433 * switching LE off. These have either LE enabled explicitly
2434 * or BR/EDR has been previously switched off.
2436 * When trying to enable an already enabled LE, then gracefully
2437 * send a positive response. Trying to disable it however will
2438 * result into rejection.
2440 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2441 if (cp->val == 0x01)
2442 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2444 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2445 MGMT_STATUS_REJECTED);
2451 enabled = lmp_host_le_capable(hdev);
2453 if (!hdev_is_powered(hdev) || val == enabled) {
2454 bool changed = false;
2456 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2457 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2461 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2462 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2466 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2471 err = new_settings(hdev, sk);
2476 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2477 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2478 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2483 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2489 hci_req_init(&req, hdev);
2491 memset(&hci_cp, 0, sizeof(hci_cp));
2495 hci_cp.simul = 0x00;
2497 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2498 disable_advertising(&req);
2501 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2504 err = hci_req_run(&req, le_enable_complete);
2506 mgmt_pending_remove(cmd);
2509 hci_dev_unlock(hdev);
2513 /* This is a helper function to test for pending mgmt commands that can
2514 * cause CoD or EIR HCI commands. We can only allow one such pending
2515 * mgmt command at a time since otherwise we cannot easily track what
2516 * the current values are, will be, and based on that calculate if a new
2517 * HCI command needs to be sent and if yes with what value.
2519 static bool pending_eir_or_class(struct hci_dev *hdev)
2521 struct mgmt_pending_cmd *cmd;
2523 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2524 switch (cmd->opcode) {
2525 case MGMT_OP_ADD_UUID:
2526 case MGMT_OP_REMOVE_UUID:
2527 case MGMT_OP_SET_DEV_CLASS:
2528 case MGMT_OP_SET_POWERED:
2536 static const u8 bluetooth_base_uuid[] = {
2537 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2538 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2541 static u8 get_uuid_size(const u8 *uuid)
2545 if (memcmp(uuid, bluetooth_base_uuid, 12))
2548 val = get_unaligned_le32(&uuid[12]);
2555 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2557 struct mgmt_pending_cmd *cmd;
2561 cmd = pending_find(mgmt_op, hdev);
2565 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2566 mgmt_status(status), hdev->dev_class, 3);
2568 mgmt_pending_remove(cmd);
2571 hci_dev_unlock(hdev);
2574 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2576 BT_DBG("status 0x%02x", status);
2578 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2581 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2583 struct mgmt_cp_add_uuid *cp = data;
2584 struct mgmt_pending_cmd *cmd;
2585 struct hci_request req;
2586 struct bt_uuid *uuid;
2589 BT_DBG("request for %s", hdev->name);
2593 if (pending_eir_or_class(hdev)) {
2594 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2599 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2605 memcpy(uuid->uuid, cp->uuid, 16);
2606 uuid->svc_hint = cp->svc_hint;
2607 uuid->size = get_uuid_size(cp->uuid);
2609 list_add_tail(&uuid->list, &hdev->uuids);
2611 hci_req_init(&req, hdev);
2616 err = hci_req_run(&req, add_uuid_complete);
2618 if (err != -ENODATA)
2621 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2622 hdev->dev_class, 3);
2626 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2635 hci_dev_unlock(hdev);
2639 static bool enable_service_cache(struct hci_dev *hdev)
2641 if (!hdev_is_powered(hdev))
2644 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2645 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2653 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2655 BT_DBG("status 0x%02x", status);
2657 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2660 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2663 struct mgmt_cp_remove_uuid *cp = data;
2664 struct mgmt_pending_cmd *cmd;
2665 struct bt_uuid *match, *tmp;
2666 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2667 struct hci_request req;
2670 BT_DBG("request for %s", hdev->name);
2674 if (pending_eir_or_class(hdev)) {
2675 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2680 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2681 hci_uuids_clear(hdev);
2683 if (enable_service_cache(hdev)) {
2684 err = mgmt_cmd_complete(sk, hdev->id,
2685 MGMT_OP_REMOVE_UUID,
2686 0, hdev->dev_class, 3);
2695 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2696 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2699 list_del(&match->list);
2705 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2706 MGMT_STATUS_INVALID_PARAMS);
2711 hci_req_init(&req, hdev);
2716 err = hci_req_run(&req, remove_uuid_complete);
2718 if (err != -ENODATA)
2721 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2722 hdev->dev_class, 3);
2726 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2735 hci_dev_unlock(hdev);
2739 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2741 BT_DBG("status 0x%02x", status);
2743 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2746 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2749 struct mgmt_cp_set_dev_class *cp = data;
2750 struct mgmt_pending_cmd *cmd;
2751 struct hci_request req;
2754 BT_DBG("request for %s", hdev->name);
2756 if (!lmp_bredr_capable(hdev))
2757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2758 MGMT_STATUS_NOT_SUPPORTED);
2762 if (pending_eir_or_class(hdev)) {
2763 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2768 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2769 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2770 MGMT_STATUS_INVALID_PARAMS);
2774 hdev->major_class = cp->major;
2775 hdev->minor_class = cp->minor;
2777 if (!hdev_is_powered(hdev)) {
2778 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2779 hdev->dev_class, 3);
2783 hci_req_init(&req, hdev);
2785 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2786 hci_dev_unlock(hdev);
2787 cancel_delayed_work_sync(&hdev->service_cache);
2794 err = hci_req_run(&req, set_class_complete);
2796 if (err != -ENODATA)
2799 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2800 hdev->dev_class, 3);
2804 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2813 hci_dev_unlock(hdev);
2817 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2820 struct mgmt_cp_load_link_keys *cp = data;
2821 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2822 sizeof(struct mgmt_link_key_info));
2823 u16 key_count, expected_len;
2827 BT_DBG("request for %s", hdev->name);
2829 if (!lmp_bredr_capable(hdev))
2830 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2831 MGMT_STATUS_NOT_SUPPORTED);
2833 key_count = __le16_to_cpu(cp->key_count);
2834 if (key_count > max_key_count) {
2835 BT_ERR("load_link_keys: too big key_count value %u",
2837 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2838 MGMT_STATUS_INVALID_PARAMS);
2841 expected_len = sizeof(*cp) + key_count *
2842 sizeof(struct mgmt_link_key_info);
2843 if (expected_len != len) {
2844 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2846 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2847 MGMT_STATUS_INVALID_PARAMS);
2850 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2851 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2852 MGMT_STATUS_INVALID_PARAMS);
2854 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2857 for (i = 0; i < key_count; i++) {
2858 struct mgmt_link_key_info *key = &cp->keys[i];
2860 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2861 return mgmt_cmd_status(sk, hdev->id,
2862 MGMT_OP_LOAD_LINK_KEYS,
2863 MGMT_STATUS_INVALID_PARAMS);
2868 hci_link_keys_clear(hdev);
2871 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2873 changed = hci_dev_test_and_clear_flag(hdev,
2874 HCI_KEEP_DEBUG_KEYS);
2877 new_settings(hdev, NULL);
2879 for (i = 0; i < key_count; i++) {
2880 struct mgmt_link_key_info *key = &cp->keys[i];
2882 /* Always ignore debug keys and require a new pairing if
2883 * the user wants to use them.
2885 if (key->type == HCI_LK_DEBUG_COMBINATION)
2888 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2889 key->type, key->pin_len, NULL);
2892 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2894 hci_dev_unlock(hdev);
2899 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2900 u8 addr_type, struct sock *skip_sk)
2902 struct mgmt_ev_device_unpaired ev;
2904 bacpy(&ev.addr.bdaddr, bdaddr);
2905 ev.addr.type = addr_type;
2907 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2911 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2914 struct mgmt_cp_unpair_device *cp = data;
2915 struct mgmt_rp_unpair_device rp;
2916 struct hci_cp_disconnect dc;
2917 struct mgmt_pending_cmd *cmd;
2918 struct hci_conn *conn;
2921 memset(&rp, 0, sizeof(rp));
2922 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2923 rp.addr.type = cp->addr.type;
2925 if (!bdaddr_type_is_valid(cp->addr.type))
2926 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2927 MGMT_STATUS_INVALID_PARAMS,
2930 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2931 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2932 MGMT_STATUS_INVALID_PARAMS,
2937 if (!hdev_is_powered(hdev)) {
2938 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2939 MGMT_STATUS_NOT_POWERED, &rp,
2944 if (cp->addr.type == BDADDR_BREDR) {
2945 /* If disconnection is requested, then look up the
2946 * connection. If the remote device is connected, it
2947 * will be later used to terminate the link.
2949 * Setting it to NULL explicitly will cause no
2950 * termination of the link.
2953 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2958 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2962 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2965 /* Defer clearing up the connection parameters
2966 * until closing to give a chance of keeping
2967 * them if a repairing happens.
2969 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2971 /* If disconnection is not requested, then
2972 * clear the connection variable so that the
2973 * link is not terminated.
2975 if (!cp->disconnect)
2979 if (cp->addr.type == BDADDR_LE_PUBLIC)
2980 addr_type = ADDR_LE_DEV_PUBLIC;
2982 addr_type = ADDR_LE_DEV_RANDOM;
2984 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2986 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2990 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2991 MGMT_STATUS_NOT_PAIRED, &rp,
2996 /* If the connection variable is set, then termination of the
2997 * link is requested.
3000 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3002 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3006 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3013 cmd->cmd_complete = addr_cmd_complete;
3015 dc.handle = cpu_to_le16(conn->handle);
3016 dc.reason = 0x13; /* Remote User Terminated Connection */
3017 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
3019 mgmt_pending_remove(cmd);
3022 hci_dev_unlock(hdev);
3026 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3029 struct mgmt_cp_disconnect *cp = data;
3030 struct mgmt_rp_disconnect rp;
3031 struct mgmt_pending_cmd *cmd;
3032 struct hci_conn *conn;
3037 memset(&rp, 0, sizeof(rp));
3038 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3039 rp.addr.type = cp->addr.type;
3041 if (!bdaddr_type_is_valid(cp->addr.type))
3042 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3043 MGMT_STATUS_INVALID_PARAMS,
3048 if (!test_bit(HCI_UP, &hdev->flags)) {
3049 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3050 MGMT_STATUS_NOT_POWERED, &rp,
3055 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3056 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3057 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3061 if (cp->addr.type == BDADDR_BREDR)
3062 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3065 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
3067 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3068 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3069 MGMT_STATUS_NOT_CONNECTED, &rp,
3074 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3080 cmd->cmd_complete = generic_cmd_complete;
3082 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3084 mgmt_pending_remove(cmd);
3087 hci_dev_unlock(hdev);
3091 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3093 switch (link_type) {
3095 switch (addr_type) {
3096 case ADDR_LE_DEV_PUBLIC:
3097 return BDADDR_LE_PUBLIC;
3100 /* Fallback to LE Random address type */
3101 return BDADDR_LE_RANDOM;
3105 /* Fallback to BR/EDR type */
3106 return BDADDR_BREDR;
3110 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3113 struct mgmt_rp_get_connections *rp;
3123 if (!hdev_is_powered(hdev)) {
3124 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3125 MGMT_STATUS_NOT_POWERED);
3130 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3131 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3135 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3136 rp = kmalloc(rp_len, GFP_KERNEL);
3143 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3144 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3146 bacpy(&rp->addr[i].bdaddr, &c->dst);
3147 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3148 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3153 rp->conn_count = cpu_to_le16(i);
3155 /* Recalculate length in case of filtered SCO connections, etc */
3156 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3158 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3164 hci_dev_unlock(hdev);
3168 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3169 struct mgmt_cp_pin_code_neg_reply *cp)
3171 struct mgmt_pending_cmd *cmd;
3174 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3179 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3180 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3182 mgmt_pending_remove(cmd);
3187 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3190 struct hci_conn *conn;
3191 struct mgmt_cp_pin_code_reply *cp = data;
3192 struct hci_cp_pin_code_reply reply;
3193 struct mgmt_pending_cmd *cmd;
3200 if (!hdev_is_powered(hdev)) {
3201 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3202 MGMT_STATUS_NOT_POWERED);
3206 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3208 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3209 MGMT_STATUS_NOT_CONNECTED);
3213 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3214 struct mgmt_cp_pin_code_neg_reply ncp;
3216 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3218 BT_ERR("PIN code is not 16 bytes long");
3220 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3222 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3223 MGMT_STATUS_INVALID_PARAMS);
3228 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3234 cmd->cmd_complete = addr_cmd_complete;
3236 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3237 reply.pin_len = cp->pin_len;
3238 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3240 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3242 mgmt_pending_remove(cmd);
3245 hci_dev_unlock(hdev);
3249 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3252 struct mgmt_cp_set_io_capability *cp = data;
3256 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3257 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3258 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3262 hdev->io_capability = cp->io_capability;
3264 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3265 hdev->io_capability);
3267 hci_dev_unlock(hdev);
3269 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3273 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3275 struct hci_dev *hdev = conn->hdev;
3276 struct mgmt_pending_cmd *cmd;
3278 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3279 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3282 if (cmd->user_data != conn)
3291 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3293 struct mgmt_rp_pair_device rp;
3294 struct hci_conn *conn = cmd->user_data;
3297 bacpy(&rp.addr.bdaddr, &conn->dst);
3298 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3300 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3301 status, &rp, sizeof(rp));
3303 /* So we don't get further callbacks for this connection */
3304 conn->connect_cfm_cb = NULL;
3305 conn->security_cfm_cb = NULL;
3306 conn->disconn_cfm_cb = NULL;
3308 hci_conn_drop(conn);
3310 /* The device is paired so there is no need to remove
3311 * its connection parameters anymore.
3313 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3320 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3322 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3323 struct mgmt_pending_cmd *cmd;
3325 cmd = find_pairing(conn);
3327 cmd->cmd_complete(cmd, status);
3328 mgmt_pending_remove(cmd);
3332 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3334 struct mgmt_pending_cmd *cmd;
3336 BT_DBG("status %u", status);
3338 cmd = find_pairing(conn);
3340 BT_DBG("Unable to find a pending command");
3344 cmd->cmd_complete(cmd, mgmt_status(status));
3345 mgmt_pending_remove(cmd);
3348 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3350 struct mgmt_pending_cmd *cmd;
3352 BT_DBG("status %u", status);
3357 cmd = find_pairing(conn);
3359 BT_DBG("Unable to find a pending command");
3363 cmd->cmd_complete(cmd, mgmt_status(status));
3364 mgmt_pending_remove(cmd);
3367 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3370 struct mgmt_cp_pair_device *cp = data;
3371 struct mgmt_rp_pair_device rp;
3372 struct mgmt_pending_cmd *cmd;
3373 u8 sec_level, auth_type;
3374 struct hci_conn *conn;
3379 memset(&rp, 0, sizeof(rp));
3380 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3381 rp.addr.type = cp->addr.type;
3383 if (!bdaddr_type_is_valid(cp->addr.type))
3384 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3385 MGMT_STATUS_INVALID_PARAMS,
3388 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3389 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3390 MGMT_STATUS_INVALID_PARAMS,
3395 if (!hdev_is_powered(hdev)) {
3396 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3397 MGMT_STATUS_NOT_POWERED, &rp,
3402 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3403 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3404 MGMT_STATUS_ALREADY_PAIRED, &rp,
3409 sec_level = BT_SECURITY_MEDIUM;
3410 auth_type = HCI_AT_DEDICATED_BONDING;
3412 if (cp->addr.type == BDADDR_BREDR) {
3413 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3418 /* Convert from L2CAP channel address type to HCI address type
3420 if (cp->addr.type == BDADDR_LE_PUBLIC)
3421 addr_type = ADDR_LE_DEV_PUBLIC;
3423 addr_type = ADDR_LE_DEV_RANDOM;
3425 /* When pairing a new device, it is expected to remember
3426 * this device for future connections. Adding the connection
3427 * parameter information ahead of time allows tracking
3428 * of the slave preferred values and will speed up any
3429 * further connection establishment.
3431 * If connection parameters already exist, then they
3432 * will be kept and this function does nothing.
3434 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3436 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3437 sec_level, HCI_LE_CONN_TIMEOUT,
3444 if (PTR_ERR(conn) == -EBUSY)
3445 status = MGMT_STATUS_BUSY;
3446 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3447 status = MGMT_STATUS_NOT_SUPPORTED;
3448 else if (PTR_ERR(conn) == -ECONNREFUSED)
3449 status = MGMT_STATUS_REJECTED;
3451 status = MGMT_STATUS_CONNECT_FAILED;
3453 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3454 status, &rp, sizeof(rp));
3458 if (conn->connect_cfm_cb) {
3459 hci_conn_drop(conn);
3460 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3461 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3465 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3468 hci_conn_drop(conn);
3472 cmd->cmd_complete = pairing_complete;
3474 /* For LE, just connecting isn't a proof that the pairing finished */
3475 if (cp->addr.type == BDADDR_BREDR) {
3476 conn->connect_cfm_cb = pairing_complete_cb;
3477 conn->security_cfm_cb = pairing_complete_cb;
3478 conn->disconn_cfm_cb = pairing_complete_cb;
3480 conn->connect_cfm_cb = le_pairing_complete_cb;
3481 conn->security_cfm_cb = le_pairing_complete_cb;
3482 conn->disconn_cfm_cb = le_pairing_complete_cb;
3485 conn->io_capability = cp->io_cap;
3486 cmd->user_data = hci_conn_get(conn);
3488 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3489 hci_conn_security(conn, sec_level, auth_type, true)) {
3490 cmd->cmd_complete(cmd, 0);
3491 mgmt_pending_remove(cmd);
3497 hci_dev_unlock(hdev);
3501 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3504 struct mgmt_addr_info *addr = data;
3505 struct mgmt_pending_cmd *cmd;
3506 struct hci_conn *conn;
3513 if (!hdev_is_powered(hdev)) {
3514 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3515 MGMT_STATUS_NOT_POWERED);
3519 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3521 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3522 MGMT_STATUS_INVALID_PARAMS);
3526 conn = cmd->user_data;
3528 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3529 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3530 MGMT_STATUS_INVALID_PARAMS);
3534 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3535 mgmt_pending_remove(cmd);
3537 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3538 addr, sizeof(*addr));
3540 hci_dev_unlock(hdev);
3544 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3545 struct mgmt_addr_info *addr, u16 mgmt_op,
3546 u16 hci_op, __le32 passkey)
3548 struct mgmt_pending_cmd *cmd;
3549 struct hci_conn *conn;
3554 if (!hdev_is_powered(hdev)) {
3555 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3556 MGMT_STATUS_NOT_POWERED, addr,
3561 if (addr->type == BDADDR_BREDR)
3562 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3564 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3567 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3568 MGMT_STATUS_NOT_CONNECTED, addr,
3573 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3574 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3576 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3577 MGMT_STATUS_SUCCESS, addr,
3580 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3581 MGMT_STATUS_FAILED, addr,
3587 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3593 cmd->cmd_complete = addr_cmd_complete;
3595 /* Continue with pairing via HCI */
3596 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3597 struct hci_cp_user_passkey_reply cp;
3599 bacpy(&cp.bdaddr, &addr->bdaddr);
3600 cp.passkey = passkey;
3601 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3603 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3607 mgmt_pending_remove(cmd);
3610 hci_dev_unlock(hdev);
3614 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3615 void *data, u16 len)
3617 struct mgmt_cp_pin_code_neg_reply *cp = data;
3621 return user_pairing_resp(sk, hdev, &cp->addr,
3622 MGMT_OP_PIN_CODE_NEG_REPLY,
3623 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3626 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3629 struct mgmt_cp_user_confirm_reply *cp = data;
3633 if (len != sizeof(*cp))
3634 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3635 MGMT_STATUS_INVALID_PARAMS);
3637 return user_pairing_resp(sk, hdev, &cp->addr,
3638 MGMT_OP_USER_CONFIRM_REPLY,
3639 HCI_OP_USER_CONFIRM_REPLY, 0);
3642 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3643 void *data, u16 len)
3645 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3649 return user_pairing_resp(sk, hdev, &cp->addr,
3650 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3651 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3654 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3657 struct mgmt_cp_user_passkey_reply *cp = data;
3661 return user_pairing_resp(sk, hdev, &cp->addr,
3662 MGMT_OP_USER_PASSKEY_REPLY,
3663 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3666 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3667 void *data, u16 len)
3669 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3673 return user_pairing_resp(sk, hdev, &cp->addr,
3674 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3675 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3678 static void update_name(struct hci_request *req)
3680 struct hci_dev *hdev = req->hdev;
3681 struct hci_cp_write_local_name cp;
3683 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3685 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3688 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3690 struct mgmt_cp_set_local_name *cp;
3691 struct mgmt_pending_cmd *cmd;
3693 BT_DBG("status 0x%02x", status);
3697 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3704 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3705 mgmt_status(status));
3707 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3710 mgmt_pending_remove(cmd);
3713 hci_dev_unlock(hdev);
3716 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3719 struct mgmt_cp_set_local_name *cp = data;
3720 struct mgmt_pending_cmd *cmd;
3721 struct hci_request req;
3728 /* If the old values are the same as the new ones just return a
3729 * direct command complete event.
3731 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3732 !memcmp(hdev->short_name, cp->short_name,
3733 sizeof(hdev->short_name))) {
3734 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3739 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3741 if (!hdev_is_powered(hdev)) {
3742 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3744 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3749 err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3755 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3761 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3763 hci_req_init(&req, hdev);
3765 if (lmp_bredr_capable(hdev)) {
3770 /* The name is stored in the scan response data and so
3771 * no need to udpate the advertising data here.
3773 if (lmp_le_capable(hdev))
3774 update_scan_rsp_data(&req);
3776 err = hci_req_run(&req, set_name_complete);
3778 mgmt_pending_remove(cmd);
3781 hci_dev_unlock(hdev);
3785 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3786 void *data, u16 data_len)
3788 struct mgmt_pending_cmd *cmd;
3791 BT_DBG("%s", hdev->name);
3795 if (!hdev_is_powered(hdev)) {
3796 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3797 MGMT_STATUS_NOT_POWERED);
3801 if (!lmp_ssp_capable(hdev)) {
3802 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3803 MGMT_STATUS_NOT_SUPPORTED);
3807 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3808 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3813 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3819 if (bredr_sc_enabled(hdev))
3820 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3823 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3826 mgmt_pending_remove(cmd);
3829 hci_dev_unlock(hdev);
3833 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3834 void *data, u16 len)
3836 struct mgmt_addr_info *addr = data;
3839 BT_DBG("%s ", hdev->name);
3841 if (!bdaddr_type_is_valid(addr->type))
3842 return mgmt_cmd_complete(sk, hdev->id,
3843 MGMT_OP_ADD_REMOTE_OOB_DATA,
3844 MGMT_STATUS_INVALID_PARAMS,
3845 addr, sizeof(*addr));
3849 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3850 struct mgmt_cp_add_remote_oob_data *cp = data;
3853 if (cp->addr.type != BDADDR_BREDR) {
3854 err = mgmt_cmd_complete(sk, hdev->id,
3855 MGMT_OP_ADD_REMOTE_OOB_DATA,
3856 MGMT_STATUS_INVALID_PARAMS,
3857 &cp->addr, sizeof(cp->addr));
3861 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3862 cp->addr.type, cp->hash,
3863 cp->rand, NULL, NULL);
3865 status = MGMT_STATUS_FAILED;
3867 status = MGMT_STATUS_SUCCESS;
3869 err = mgmt_cmd_complete(sk, hdev->id,
3870 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3871 &cp->addr, sizeof(cp->addr));
3872 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3873 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3874 u8 *rand192, *hash192, *rand256, *hash256;
3877 if (bdaddr_type_is_le(cp->addr.type)) {
3878 /* Enforce zero-valued 192-bit parameters as
3879 * long as legacy SMP OOB isn't implemented.
3881 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3882 memcmp(cp->hash192, ZERO_KEY, 16)) {
3883 err = mgmt_cmd_complete(sk, hdev->id,
3884 MGMT_OP_ADD_REMOTE_OOB_DATA,
3885 MGMT_STATUS_INVALID_PARAMS,
3886 addr, sizeof(*addr));
3893 /* In case one of the P-192 values is set to zero,
3894 * then just disable OOB data for P-192.
3896 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3897 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3901 rand192 = cp->rand192;
3902 hash192 = cp->hash192;
3906 /* In case one of the P-256 values is set to zero, then just
3907 * disable OOB data for P-256.
3909 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3910 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3914 rand256 = cp->rand256;
3915 hash256 = cp->hash256;
3918 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3919 cp->addr.type, hash192, rand192,
3922 status = MGMT_STATUS_FAILED;
3924 status = MGMT_STATUS_SUCCESS;
3926 err = mgmt_cmd_complete(sk, hdev->id,
3927 MGMT_OP_ADD_REMOTE_OOB_DATA,
3928 status, &cp->addr, sizeof(cp->addr));
3930 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3931 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3932 MGMT_STATUS_INVALID_PARAMS);
3936 hci_dev_unlock(hdev);
3940 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3941 void *data, u16 len)
3943 struct mgmt_cp_remove_remote_oob_data *cp = data;
3947 BT_DBG("%s", hdev->name);
3949 if (cp->addr.type != BDADDR_BREDR)
3950 return mgmt_cmd_complete(sk, hdev->id,
3951 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3952 MGMT_STATUS_INVALID_PARAMS,
3953 &cp->addr, sizeof(cp->addr));
3957 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3958 hci_remote_oob_data_clear(hdev);
3959 status = MGMT_STATUS_SUCCESS;
3963 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3965 status = MGMT_STATUS_INVALID_PARAMS;
3967 status = MGMT_STATUS_SUCCESS;
3970 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3971 status, &cp->addr, sizeof(cp->addr));
3973 hci_dev_unlock(hdev);
3977 static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
3979 struct hci_dev *hdev = req->hdev;
3980 struct hci_cp_inquiry cp;
3981 /* General inquiry access code (GIAC) */
3982 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3984 *status = mgmt_bredr_support(hdev);
3988 if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
3989 *status = MGMT_STATUS_BUSY;
3993 hci_inquiry_cache_flush(hdev);
3995 memset(&cp, 0, sizeof(cp));
3996 memcpy(&cp.lap, lap, sizeof(cp.lap));
3997 cp.length = DISCOV_BREDR_INQUIRY_LEN;
3999 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
4004 static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
4006 struct hci_dev *hdev = req->hdev;
4007 struct hci_cp_le_set_scan_param param_cp;
4008 struct hci_cp_le_set_scan_enable enable_cp;
4012 *status = mgmt_le_support(hdev);
4016 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
4017 /* Don't let discovery abort an outgoing connection attempt
4018 * that's using directed advertising.
4020 if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
4021 *status = MGMT_STATUS_REJECTED;
4025 disable_advertising(req);
4028 /* If controller is scanning, it means the background scanning is
4029 * running. Thus, we should temporarily stop it in order to set the
4030 * discovery scanning parameters.
4032 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
4033 hci_req_add_le_scan_disable(req);
4035 /* All active scans will be done with either a resolvable private
4036 * address (when privacy feature has been enabled) or non-resolvable
4039 err = hci_update_random_address(req, true, &own_addr_type);
4041 *status = MGMT_STATUS_FAILED;
4045 memset(¶m_cp, 0, sizeof(param_cp));
4046 param_cp.type = LE_SCAN_ACTIVE;
4047 param_cp.interval = cpu_to_le16(interval);
4048 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
4049 param_cp.own_address_type = own_addr_type;
4051 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
4054 memset(&enable_cp, 0, sizeof(enable_cp));
4055 enable_cp.enable = LE_SCAN_ENABLE;
4056 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4058 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
4064 static bool trigger_discovery(struct hci_request *req, u8 *status)
4066 struct hci_dev *hdev = req->hdev;
4068 switch (hdev->discovery.type) {
4069 case DISCOV_TYPE_BREDR:
4070 if (!trigger_bredr_inquiry(req, status))
4074 case DISCOV_TYPE_INTERLEAVED:
4075 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
4077 /* During simultaneous discovery, we double LE scan
4078 * interval. We must leave some time for the controller
4079 * to do BR/EDR inquiry.
4081 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
4085 if (!trigger_bredr_inquiry(req, status))
4091 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4092 *status = MGMT_STATUS_NOT_SUPPORTED;
4097 case DISCOV_TYPE_LE:
4098 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
4103 *status = MGMT_STATUS_INVALID_PARAMS;
4110 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
4113 struct mgmt_pending_cmd *cmd;
4114 unsigned long timeout;
4116 BT_DBG("status %d", status);
4120 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4122 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4125 cmd->cmd_complete(cmd, mgmt_status(status));
4126 mgmt_pending_remove(cmd);
4130 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4134 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
4136 /* If the scan involves LE scan, pick proper timeout to schedule
4137 * hdev->le_scan_disable that will stop it.
4139 switch (hdev->discovery.type) {
4140 case DISCOV_TYPE_LE:
4141 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4143 case DISCOV_TYPE_INTERLEAVED:
4144 /* When running simultaneous discovery, the LE scanning time
4145 * should occupy the whole discovery time sine BR/EDR inquiry
4146 * and LE scanning are scheduled by the controller.
4148 * For interleaving discovery in comparison, BR/EDR inquiry
4149 * and LE scanning are done sequentially with separate
4152 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
4153 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4155 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4157 case DISCOV_TYPE_BREDR:
4161 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4167 /* When service discovery is used and the controller has
4168 * a strict duplicate filter, it is important to remember
4169 * the start and duration of the scan. This is required
4170 * for restarting scanning during the discovery phase.
4172 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
4174 hdev->discovery.result_filtering) {
4175 hdev->discovery.scan_start = jiffies;
4176 hdev->discovery.scan_duration = timeout;
4179 queue_delayed_work(hdev->workqueue,
4180 &hdev->le_scan_disable, timeout);
4184 hci_dev_unlock(hdev);
4187 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4188 void *data, u16 len)
4190 struct mgmt_cp_start_discovery *cp = data;
4191 struct mgmt_pending_cmd *cmd;
4192 struct hci_request req;
4196 BT_DBG("%s", hdev->name);
4200 if (!hdev_is_powered(hdev)) {
4201 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4202 MGMT_STATUS_NOT_POWERED,
4203 &cp->type, sizeof(cp->type));
4207 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4208 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4209 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4210 MGMT_STATUS_BUSY, &cp->type,
4215 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4221 cmd->cmd_complete = generic_cmd_complete;
4223 /* Clear the discovery filter first to free any previously
4224 * allocated memory for the UUID list.
4226 hci_discovery_filter_clear(hdev);
4228 hdev->discovery.type = cp->type;
4229 hdev->discovery.report_invalid_rssi = false;
4231 hci_req_init(&req, hdev);
4233 if (!trigger_discovery(&req, &status)) {
4234 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4235 status, &cp->type, sizeof(cp->type));
4236 mgmt_pending_remove(cmd);
4240 err = hci_req_run(&req, start_discovery_complete);
4242 mgmt_pending_remove(cmd);
4246 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4249 hci_dev_unlock(hdev);
4253 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4256 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4260 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4261 void *data, u16 len)
4263 struct mgmt_cp_start_service_discovery *cp = data;
4264 struct mgmt_pending_cmd *cmd;
4265 struct hci_request req;
4266 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4267 u16 uuid_count, expected_len;
4271 BT_DBG("%s", hdev->name);
4275 if (!hdev_is_powered(hdev)) {
4276 err = mgmt_cmd_complete(sk, hdev->id,
4277 MGMT_OP_START_SERVICE_DISCOVERY,
4278 MGMT_STATUS_NOT_POWERED,
4279 &cp->type, sizeof(cp->type));
4283 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4284 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4285 err = mgmt_cmd_complete(sk, hdev->id,
4286 MGMT_OP_START_SERVICE_DISCOVERY,
4287 MGMT_STATUS_BUSY, &cp->type,
4292 uuid_count = __le16_to_cpu(cp->uuid_count);
4293 if (uuid_count > max_uuid_count) {
4294 BT_ERR("service_discovery: too big uuid_count value %u",
4296 err = mgmt_cmd_complete(sk, hdev->id,
4297 MGMT_OP_START_SERVICE_DISCOVERY,
4298 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4303 expected_len = sizeof(*cp) + uuid_count * 16;
4304 if (expected_len != len) {
4305 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4307 err = mgmt_cmd_complete(sk, hdev->id,
4308 MGMT_OP_START_SERVICE_DISCOVERY,
4309 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4314 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4321 cmd->cmd_complete = service_discovery_cmd_complete;
4323 /* Clear the discovery filter first to free any previously
4324 * allocated memory for the UUID list.
4326 hci_discovery_filter_clear(hdev);
4328 hdev->discovery.result_filtering = true;
4329 hdev->discovery.type = cp->type;
4330 hdev->discovery.rssi = cp->rssi;
4331 hdev->discovery.uuid_count = uuid_count;
4333 if (uuid_count > 0) {
4334 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4336 if (!hdev->discovery.uuids) {
4337 err = mgmt_cmd_complete(sk, hdev->id,
4338 MGMT_OP_START_SERVICE_DISCOVERY,
4340 &cp->type, sizeof(cp->type));
4341 mgmt_pending_remove(cmd);
4346 hci_req_init(&req, hdev);
4348 if (!trigger_discovery(&req, &status)) {
4349 err = mgmt_cmd_complete(sk, hdev->id,
4350 MGMT_OP_START_SERVICE_DISCOVERY,
4351 status, &cp->type, sizeof(cp->type));
4352 mgmt_pending_remove(cmd);
4356 err = hci_req_run(&req, start_discovery_complete);
4358 mgmt_pending_remove(cmd);
4362 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4365 hci_dev_unlock(hdev);
4369 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4371 struct mgmt_pending_cmd *cmd;
4373 BT_DBG("status %d", status);
4377 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4379 cmd->cmd_complete(cmd, mgmt_status(status));
4380 mgmt_pending_remove(cmd);
4384 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4386 hci_dev_unlock(hdev);
4389 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4392 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4393 struct mgmt_pending_cmd *cmd;
4394 struct hci_request req;
4397 BT_DBG("%s", hdev->name);
4401 if (!hci_discovery_active(hdev)) {
4402 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4403 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4404 sizeof(mgmt_cp->type));
4408 if (hdev->discovery.type != mgmt_cp->type) {
4409 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4410 MGMT_STATUS_INVALID_PARAMS,
4411 &mgmt_cp->type, sizeof(mgmt_cp->type));
4415 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4421 cmd->cmd_complete = generic_cmd_complete;
4423 hci_req_init(&req, hdev);
4425 hci_stop_discovery(&req);
4427 err = hci_req_run(&req, stop_discovery_complete);
4429 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4433 mgmt_pending_remove(cmd);
4435 /* If no HCI commands were sent we're done */
4436 if (err == -ENODATA) {
4437 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4438 &mgmt_cp->type, sizeof(mgmt_cp->type));
4439 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4443 hci_dev_unlock(hdev);
4447 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4450 struct mgmt_cp_confirm_name *cp = data;
4451 struct inquiry_entry *e;
4454 BT_DBG("%s", hdev->name);
4458 if (!hci_discovery_active(hdev)) {
4459 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4460 MGMT_STATUS_FAILED, &cp->addr,
4465 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4467 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4468 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4473 if (cp->name_known) {
4474 e->name_state = NAME_KNOWN;
4477 e->name_state = NAME_NEEDED;
4478 hci_inquiry_cache_update_resolve(hdev, e);
4481 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4482 &cp->addr, sizeof(cp->addr));
4485 hci_dev_unlock(hdev);
4489 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4492 struct mgmt_cp_block_device *cp = data;
4496 BT_DBG("%s", hdev->name);
4498 if (!bdaddr_type_is_valid(cp->addr.type))
4499 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4500 MGMT_STATUS_INVALID_PARAMS,
4501 &cp->addr, sizeof(cp->addr));
4505 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4508 status = MGMT_STATUS_FAILED;
4512 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4514 status = MGMT_STATUS_SUCCESS;
4517 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4518 &cp->addr, sizeof(cp->addr));
4520 hci_dev_unlock(hdev);
4525 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4528 struct mgmt_cp_unblock_device *cp = data;
4532 BT_DBG("%s", hdev->name);
4534 if (!bdaddr_type_is_valid(cp->addr.type))
4535 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4536 MGMT_STATUS_INVALID_PARAMS,
4537 &cp->addr, sizeof(cp->addr));
4541 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4544 status = MGMT_STATUS_INVALID_PARAMS;
4548 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4550 status = MGMT_STATUS_SUCCESS;
4553 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4554 &cp->addr, sizeof(cp->addr));
4556 hci_dev_unlock(hdev);
4561 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4564 struct mgmt_cp_set_device_id *cp = data;
4565 struct hci_request req;
4569 BT_DBG("%s", hdev->name);
4571 source = __le16_to_cpu(cp->source);
4573 if (source > 0x0002)
4574 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4575 MGMT_STATUS_INVALID_PARAMS);
4579 hdev->devid_source = source;
4580 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4581 hdev->devid_product = __le16_to_cpu(cp->product);
4582 hdev->devid_version = __le16_to_cpu(cp->version);
4584 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4587 hci_req_init(&req, hdev);
4589 hci_req_run(&req, NULL);
4591 hci_dev_unlock(hdev);
4596 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4599 BT_DBG("status %d", status);
4602 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4605 struct cmd_lookup match = { NULL, hdev };
4606 struct hci_request req;
4611 u8 mgmt_err = mgmt_status(status);
4613 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4614 cmd_status_rsp, &mgmt_err);
4618 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4619 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4621 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4623 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4626 new_settings(hdev, match.sk);
4631 /* If "Set Advertising" was just disabled and instance advertising was
4632 * set up earlier, then enable the advertising instance.
4634 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4635 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
4638 hci_req_init(&req, hdev);
4640 update_adv_data(&req);
4641 enable_advertising(&req);
4643 if (hci_req_run(&req, enable_advertising_instance) < 0)
4644 BT_ERR("Failed to re-configure advertising");
4647 hci_dev_unlock(hdev);
4650 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4653 struct mgmt_mode *cp = data;
4654 struct mgmt_pending_cmd *cmd;
4655 struct hci_request req;
4659 BT_DBG("request for %s", hdev->name);
4661 status = mgmt_le_support(hdev);
4663 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4666 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4667 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4668 MGMT_STATUS_INVALID_PARAMS);
4674 /* The following conditions are ones which mean that we should
4675 * not do any HCI communication but directly send a mgmt
4676 * response to user space (after toggling the flag if
4679 if (!hdev_is_powered(hdev) ||
4680 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4681 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4682 hci_conn_num(hdev, LE_LINK) > 0 ||
4683 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4684 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4688 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4689 if (cp->val == 0x02)
4690 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4692 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4694 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4695 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4698 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4703 err = new_settings(hdev, sk);
4708 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4709 pending_find(MGMT_OP_SET_LE, hdev)) {
4710 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4715 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4721 hci_req_init(&req, hdev);
4723 if (cp->val == 0x02)
4724 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4726 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4729 /* Switch to instance "0" for the Set Advertising setting. */
4730 update_adv_data_for_instance(&req, 0);
4731 update_scan_rsp_data_for_instance(&req, 0);
4732 enable_advertising(&req);
4734 disable_advertising(&req);
4737 err = hci_req_run(&req, set_advertising_complete);
4739 mgmt_pending_remove(cmd);
4742 hci_dev_unlock(hdev);
4746 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4747 void *data, u16 len)
4749 struct mgmt_cp_set_static_address *cp = data;
4752 BT_DBG("%s", hdev->name);
4754 if (!lmp_le_capable(hdev))
4755 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4756 MGMT_STATUS_NOT_SUPPORTED);
4758 if (hdev_is_powered(hdev))
4759 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4760 MGMT_STATUS_REJECTED);
4762 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4763 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4764 return mgmt_cmd_status(sk, hdev->id,
4765 MGMT_OP_SET_STATIC_ADDRESS,
4766 MGMT_STATUS_INVALID_PARAMS);
4768 /* Two most significant bits shall be set */
4769 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4770 return mgmt_cmd_status(sk, hdev->id,
4771 MGMT_OP_SET_STATIC_ADDRESS,
4772 MGMT_STATUS_INVALID_PARAMS);
4777 bacpy(&hdev->static_addr, &cp->bdaddr);
4779 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4783 err = new_settings(hdev, sk);
4786 hci_dev_unlock(hdev);
4790 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4791 void *data, u16 len)
4793 struct mgmt_cp_set_scan_params *cp = data;
4794 __u16 interval, window;
4797 BT_DBG("%s", hdev->name);
4799 if (!lmp_le_capable(hdev))
4800 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4801 MGMT_STATUS_NOT_SUPPORTED);
4803 interval = __le16_to_cpu(cp->interval);
4805 if (interval < 0x0004 || interval > 0x4000)
4806 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4807 MGMT_STATUS_INVALID_PARAMS);
4809 window = __le16_to_cpu(cp->window);
4811 if (window < 0x0004 || window > 0x4000)
4812 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4813 MGMT_STATUS_INVALID_PARAMS);
4815 if (window > interval)
4816 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4817 MGMT_STATUS_INVALID_PARAMS);
4821 hdev->le_scan_interval = interval;
4822 hdev->le_scan_window = window;
4824 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4827 /* If background scan is running, restart it so new parameters are
4830 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4831 hdev->discovery.state == DISCOVERY_STOPPED) {
4832 struct hci_request req;
4834 hci_req_init(&req, hdev);
4836 hci_req_add_le_scan_disable(&req);
4837 hci_req_add_le_passive_scan(&req);
4839 hci_req_run(&req, NULL);
4842 hci_dev_unlock(hdev);
4847 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4850 struct mgmt_pending_cmd *cmd;
4852 BT_DBG("status 0x%02x", status);
4856 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4861 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4862 mgmt_status(status));
4864 struct mgmt_mode *cp = cmd->param;
4867 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4869 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4871 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4872 new_settings(hdev, cmd->sk);
4875 mgmt_pending_remove(cmd);
4878 hci_dev_unlock(hdev);
4881 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4882 void *data, u16 len)
4884 struct mgmt_mode *cp = data;
4885 struct mgmt_pending_cmd *cmd;
4886 struct hci_request req;
4889 BT_DBG("%s", hdev->name);
4891 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4892 hdev->hci_ver < BLUETOOTH_VER_1_2)
4893 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4894 MGMT_STATUS_NOT_SUPPORTED);
4896 if (cp->val != 0x00 && cp->val != 0x01)
4897 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4898 MGMT_STATUS_INVALID_PARAMS);
4902 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4903 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4908 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4909 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4914 if (!hdev_is_powered(hdev)) {
4915 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4916 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4918 new_settings(hdev, sk);
4922 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4929 hci_req_init(&req, hdev);
4931 write_fast_connectable(&req, cp->val);
4933 err = hci_req_run(&req, fast_connectable_complete);
4935 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4936 MGMT_STATUS_FAILED);
4937 mgmt_pending_remove(cmd);
4941 hci_dev_unlock(hdev);
4946 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4948 struct mgmt_pending_cmd *cmd;
4950 BT_DBG("status 0x%02x", status);
4954 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4959 u8 mgmt_err = mgmt_status(status);
4961 /* We need to restore the flag if related HCI commands
4964 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4966 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4968 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4969 new_settings(hdev, cmd->sk);
4972 mgmt_pending_remove(cmd);
4975 hci_dev_unlock(hdev);
4978 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4980 struct mgmt_mode *cp = data;
4981 struct mgmt_pending_cmd *cmd;
4982 struct hci_request req;
4985 BT_DBG("request for %s", hdev->name);
4987 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4988 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4989 MGMT_STATUS_NOT_SUPPORTED);
4991 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4993 MGMT_STATUS_REJECTED);
4995 if (cp->val != 0x00 && cp->val != 0x01)
4996 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4997 MGMT_STATUS_INVALID_PARAMS);
5001 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5002 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5006 if (!hdev_is_powered(hdev)) {
5008 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5009 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5010 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5011 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5012 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5015 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5017 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5021 err = new_settings(hdev, sk);
5025 /* Reject disabling when powered on */
5027 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5028 MGMT_STATUS_REJECTED);
5031 /* When configuring a dual-mode controller to operate
5032 * with LE only and using a static address, then switching
5033 * BR/EDR back on is not allowed.
5035 * Dual-mode controllers shall operate with the public
5036 * address as its identity address for BR/EDR and LE. So
5037 * reject the attempt to create an invalid configuration.
5039 * The same restrictions applies when secure connections
5040 * has been enabled. For BR/EDR this is a controller feature
5041 * while for LE it is a host stack feature. This means that
5042 * switching BR/EDR back on when secure connections has been
5043 * enabled is not a supported transaction.
5045 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5046 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5047 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5048 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5049 MGMT_STATUS_REJECTED);
5054 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5055 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5060 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5066 /* We need to flip the bit already here so that update_adv_data
5067 * generates the correct flags.
5069 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5071 hci_req_init(&req, hdev);
5073 write_fast_connectable(&req, false);
5074 __hci_update_page_scan(&req);
5076 /* Since only the advertising data flags will change, there
5077 * is no need to update the scan response data.
5079 update_adv_data(&req);
5081 err = hci_req_run(&req, set_bredr_complete);
5083 mgmt_pending_remove(cmd);
5086 hci_dev_unlock(hdev);
5090 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5092 struct mgmt_pending_cmd *cmd;
5093 struct mgmt_mode *cp;
5095 BT_DBG("%s status %u", hdev->name, status);
5099 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5104 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5105 mgmt_status(status));
5113 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5114 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5117 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5118 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5121 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5122 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5126 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5127 new_settings(hdev, cmd->sk);
5130 mgmt_pending_remove(cmd);
5132 hci_dev_unlock(hdev);
5135 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5136 void *data, u16 len)
5138 struct mgmt_mode *cp = data;
5139 struct mgmt_pending_cmd *cmd;
5140 struct hci_request req;
5144 BT_DBG("request for %s", hdev->name);
5146 if (!lmp_sc_capable(hdev) &&
5147 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5148 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5149 MGMT_STATUS_NOT_SUPPORTED);
5151 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5152 lmp_sc_capable(hdev) &&
5153 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5154 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5155 MGMT_STATUS_REJECTED);
5157 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5158 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5159 MGMT_STATUS_INVALID_PARAMS);
5163 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5164 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5168 changed = !hci_dev_test_and_set_flag(hdev,
5170 if (cp->val == 0x02)
5171 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5173 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5175 changed = hci_dev_test_and_clear_flag(hdev,
5177 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5180 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5185 err = new_settings(hdev, sk);
5190 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5191 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5198 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5199 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5200 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5204 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5210 hci_req_init(&req, hdev);
5211 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5212 err = hci_req_run(&req, sc_enable_complete);
5214 mgmt_pending_remove(cmd);
5219 hci_dev_unlock(hdev);
5223 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5224 void *data, u16 len)
5226 struct mgmt_mode *cp = data;
5227 bool changed, use_changed;
5230 BT_DBG("request for %s", hdev->name);
5232 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5233 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5234 MGMT_STATUS_INVALID_PARAMS);
5239 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5241 changed = hci_dev_test_and_clear_flag(hdev,
5242 HCI_KEEP_DEBUG_KEYS);
5244 if (cp->val == 0x02)
5245 use_changed = !hci_dev_test_and_set_flag(hdev,
5246 HCI_USE_DEBUG_KEYS);
5248 use_changed = hci_dev_test_and_clear_flag(hdev,
5249 HCI_USE_DEBUG_KEYS);
5251 if (hdev_is_powered(hdev) && use_changed &&
5252 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5253 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5254 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5255 sizeof(mode), &mode);
5258 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5263 err = new_settings(hdev, sk);
5266 hci_dev_unlock(hdev);
5270 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5273 struct mgmt_cp_set_privacy *cp = cp_data;
5277 BT_DBG("request for %s", hdev->name);
5279 if (!lmp_le_capable(hdev))
5280 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5281 MGMT_STATUS_NOT_SUPPORTED);
5283 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5284 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5285 MGMT_STATUS_INVALID_PARAMS);
5287 if (hdev_is_powered(hdev))
5288 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5289 MGMT_STATUS_REJECTED);
5293 /* If user space supports this command it is also expected to
5294 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5296 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5299 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5300 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5301 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5303 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5304 memset(hdev->irk, 0, sizeof(hdev->irk));
5305 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5308 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5313 err = new_settings(hdev, sk);
5316 hci_dev_unlock(hdev);
5320 static bool irk_is_valid(struct mgmt_irk_info *irk)
5322 switch (irk->addr.type) {
5323 case BDADDR_LE_PUBLIC:
5326 case BDADDR_LE_RANDOM:
5327 /* Two most significant bits shall be set */
5328 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5336 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5339 struct mgmt_cp_load_irks *cp = cp_data;
5340 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5341 sizeof(struct mgmt_irk_info));
5342 u16 irk_count, expected_len;
5345 BT_DBG("request for %s", hdev->name);
5347 if (!lmp_le_capable(hdev))
5348 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5349 MGMT_STATUS_NOT_SUPPORTED);
5351 irk_count = __le16_to_cpu(cp->irk_count);
5352 if (irk_count > max_irk_count) {
5353 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5354 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5355 MGMT_STATUS_INVALID_PARAMS);
5358 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5359 if (expected_len != len) {
5360 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5362 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5363 MGMT_STATUS_INVALID_PARAMS);
5366 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5368 for (i = 0; i < irk_count; i++) {
5369 struct mgmt_irk_info *key = &cp->irks[i];
5371 if (!irk_is_valid(key))
5372 return mgmt_cmd_status(sk, hdev->id,
5374 MGMT_STATUS_INVALID_PARAMS);
5379 hci_smp_irks_clear(hdev);
5381 for (i = 0; i < irk_count; i++) {
5382 struct mgmt_irk_info *irk = &cp->irks[i];
5385 if (irk->addr.type == BDADDR_LE_PUBLIC)
5386 addr_type = ADDR_LE_DEV_PUBLIC;
5388 addr_type = ADDR_LE_DEV_RANDOM;
5390 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5394 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5396 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5398 hci_dev_unlock(hdev);
5403 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5405 if (key->master != 0x00 && key->master != 0x01)
5408 switch (key->addr.type) {
5409 case BDADDR_LE_PUBLIC:
5412 case BDADDR_LE_RANDOM:
5413 /* Two most significant bits shall be set */
5414 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5422 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5423 void *cp_data, u16 len)
5425 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5426 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5427 sizeof(struct mgmt_ltk_info));
5428 u16 key_count, expected_len;
5431 BT_DBG("request for %s", hdev->name);
5433 if (!lmp_le_capable(hdev))
5434 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5435 MGMT_STATUS_NOT_SUPPORTED);
5437 key_count = __le16_to_cpu(cp->key_count);
5438 if (key_count > max_key_count) {
5439 BT_ERR("load_ltks: too big key_count value %u", key_count);
5440 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5441 MGMT_STATUS_INVALID_PARAMS);
5444 expected_len = sizeof(*cp) + key_count *
5445 sizeof(struct mgmt_ltk_info);
5446 if (expected_len != len) {
5447 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5449 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5450 MGMT_STATUS_INVALID_PARAMS);
5453 BT_DBG("%s key_count %u", hdev->name, key_count);
5455 for (i = 0; i < key_count; i++) {
5456 struct mgmt_ltk_info *key = &cp->keys[i];
5458 if (!ltk_is_valid(key))
5459 return mgmt_cmd_status(sk, hdev->id,
5460 MGMT_OP_LOAD_LONG_TERM_KEYS,
5461 MGMT_STATUS_INVALID_PARAMS);
5466 hci_smp_ltks_clear(hdev);
5468 for (i = 0; i < key_count; i++) {
5469 struct mgmt_ltk_info *key = &cp->keys[i];
5470 u8 type, addr_type, authenticated;
5472 if (key->addr.type == BDADDR_LE_PUBLIC)
5473 addr_type = ADDR_LE_DEV_PUBLIC;
5475 addr_type = ADDR_LE_DEV_RANDOM;
5477 switch (key->type) {
5478 case MGMT_LTK_UNAUTHENTICATED:
5479 authenticated = 0x00;
5480 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5482 case MGMT_LTK_AUTHENTICATED:
5483 authenticated = 0x01;
5484 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5486 case MGMT_LTK_P256_UNAUTH:
5487 authenticated = 0x00;
5488 type = SMP_LTK_P256;
5490 case MGMT_LTK_P256_AUTH:
5491 authenticated = 0x01;
5492 type = SMP_LTK_P256;
5494 case MGMT_LTK_P256_DEBUG:
5495 authenticated = 0x00;
5496 type = SMP_LTK_P256_DEBUG;
5501 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5502 authenticated, key->val, key->enc_size, key->ediv,
5506 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5509 hci_dev_unlock(hdev);
5514 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5516 struct hci_conn *conn = cmd->user_data;
5517 struct mgmt_rp_get_conn_info rp;
5520 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5522 if (status == MGMT_STATUS_SUCCESS) {
5523 rp.rssi = conn->rssi;
5524 rp.tx_power = conn->tx_power;
5525 rp.max_tx_power = conn->max_tx_power;
5527 rp.rssi = HCI_RSSI_INVALID;
5528 rp.tx_power = HCI_TX_POWER_INVALID;
5529 rp.max_tx_power = HCI_TX_POWER_INVALID;
5532 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5533 status, &rp, sizeof(rp));
5535 hci_conn_drop(conn);
5541 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5544 struct hci_cp_read_rssi *cp;
5545 struct mgmt_pending_cmd *cmd;
5546 struct hci_conn *conn;
5550 BT_DBG("status 0x%02x", hci_status);
5554 /* Commands sent in request are either Read RSSI or Read Transmit Power
5555 * Level so we check which one was last sent to retrieve connection
5556 * handle. Both commands have handle as first parameter so it's safe to
5557 * cast data on the same command struct.
5559 * First command sent is always Read RSSI and we fail only if it fails.
5560 * In other case we simply override error to indicate success as we
5561 * already remembered if TX power value is actually valid.
5563 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5565 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5566 status = MGMT_STATUS_SUCCESS;
5568 status = mgmt_status(hci_status);
5572 BT_ERR("invalid sent_cmd in conn_info response");
5576 handle = __le16_to_cpu(cp->handle);
5577 conn = hci_conn_hash_lookup_handle(hdev, handle);
5579 BT_ERR("unknown handle (%d) in conn_info response", handle);
5583 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5587 cmd->cmd_complete(cmd, status);
5588 mgmt_pending_remove(cmd);
5591 hci_dev_unlock(hdev);
5594 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5597 struct mgmt_cp_get_conn_info *cp = data;
5598 struct mgmt_rp_get_conn_info rp;
5599 struct hci_conn *conn;
5600 unsigned long conn_info_age;
5603 BT_DBG("%s", hdev->name);
5605 memset(&rp, 0, sizeof(rp));
5606 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5607 rp.addr.type = cp->addr.type;
5609 if (!bdaddr_type_is_valid(cp->addr.type))
5610 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5611 MGMT_STATUS_INVALID_PARAMS,
5616 if (!hdev_is_powered(hdev)) {
5617 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5618 MGMT_STATUS_NOT_POWERED, &rp,
5623 if (cp->addr.type == BDADDR_BREDR)
5624 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5627 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5629 if (!conn || conn->state != BT_CONNECTED) {
5630 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5631 MGMT_STATUS_NOT_CONNECTED, &rp,
5636 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5637 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5638 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5642 /* To avoid client trying to guess when to poll again for information we
5643 * calculate conn info age as random value between min/max set in hdev.
5645 conn_info_age = hdev->conn_info_min_age +
5646 prandom_u32_max(hdev->conn_info_max_age -
5647 hdev->conn_info_min_age);
5649 /* Query controller to refresh cached values if they are too old or were
5652 if (time_after(jiffies, conn->conn_info_timestamp +
5653 msecs_to_jiffies(conn_info_age)) ||
5654 !conn->conn_info_timestamp) {
5655 struct hci_request req;
5656 struct hci_cp_read_tx_power req_txp_cp;
5657 struct hci_cp_read_rssi req_rssi_cp;
5658 struct mgmt_pending_cmd *cmd;
5660 hci_req_init(&req, hdev);
5661 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5662 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5665 /* For LE links TX power does not change thus we don't need to
5666 * query for it once value is known.
5668 if (!bdaddr_type_is_le(cp->addr.type) ||
5669 conn->tx_power == HCI_TX_POWER_INVALID) {
5670 req_txp_cp.handle = cpu_to_le16(conn->handle);
5671 req_txp_cp.type = 0x00;
5672 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5673 sizeof(req_txp_cp), &req_txp_cp);
5676 /* Max TX power needs to be read only once per connection */
5677 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5678 req_txp_cp.handle = cpu_to_le16(conn->handle);
5679 req_txp_cp.type = 0x01;
5680 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5681 sizeof(req_txp_cp), &req_txp_cp);
5684 err = hci_req_run(&req, conn_info_refresh_complete);
5688 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5695 hci_conn_hold(conn);
5696 cmd->user_data = hci_conn_get(conn);
5697 cmd->cmd_complete = conn_info_cmd_complete;
5699 conn->conn_info_timestamp = jiffies;
5701 /* Cache is valid, just reply with values cached in hci_conn */
5702 rp.rssi = conn->rssi;
5703 rp.tx_power = conn->tx_power;
5704 rp.max_tx_power = conn->max_tx_power;
5706 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5707 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5711 hci_dev_unlock(hdev);
5715 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5717 struct hci_conn *conn = cmd->user_data;
5718 struct mgmt_rp_get_clock_info rp;
5719 struct hci_dev *hdev;
5722 memset(&rp, 0, sizeof(rp));
5723 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5728 hdev = hci_dev_get(cmd->index);
5730 rp.local_clock = cpu_to_le32(hdev->clock);
5735 rp.piconet_clock = cpu_to_le32(conn->clock);
5736 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5740 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5744 hci_conn_drop(conn);
5751 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5753 struct hci_cp_read_clock *hci_cp;
5754 struct mgmt_pending_cmd *cmd;
5755 struct hci_conn *conn;
5757 BT_DBG("%s status %u", hdev->name, status);
5761 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5765 if (hci_cp->which) {
5766 u16 handle = __le16_to_cpu(hci_cp->handle);
5767 conn = hci_conn_hash_lookup_handle(hdev, handle);
5772 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5776 cmd->cmd_complete(cmd, mgmt_status(status));
5777 mgmt_pending_remove(cmd);
5780 hci_dev_unlock(hdev);
5783 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5786 struct mgmt_cp_get_clock_info *cp = data;
5787 struct mgmt_rp_get_clock_info rp;
5788 struct hci_cp_read_clock hci_cp;
5789 struct mgmt_pending_cmd *cmd;
5790 struct hci_request req;
5791 struct hci_conn *conn;
5794 BT_DBG("%s", hdev->name);
5796 memset(&rp, 0, sizeof(rp));
5797 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5798 rp.addr.type = cp->addr.type;
5800 if (cp->addr.type != BDADDR_BREDR)
5801 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5802 MGMT_STATUS_INVALID_PARAMS,
5807 if (!hdev_is_powered(hdev)) {
5808 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5809 MGMT_STATUS_NOT_POWERED, &rp,
5814 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5815 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5817 if (!conn || conn->state != BT_CONNECTED) {
5818 err = mgmt_cmd_complete(sk, hdev->id,
5819 MGMT_OP_GET_CLOCK_INFO,
5820 MGMT_STATUS_NOT_CONNECTED,
5828 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5834 cmd->cmd_complete = clock_info_cmd_complete;
5836 hci_req_init(&req, hdev);
5838 memset(&hci_cp, 0, sizeof(hci_cp));
5839 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5842 hci_conn_hold(conn);
5843 cmd->user_data = hci_conn_get(conn);
5845 hci_cp.handle = cpu_to_le16(conn->handle);
5846 hci_cp.which = 0x01; /* Piconet clock */
5847 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5850 err = hci_req_run(&req, get_clock_info_complete);
5852 mgmt_pending_remove(cmd);
5855 hci_dev_unlock(hdev);
5859 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5861 struct hci_conn *conn;
5863 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5867 if (conn->dst_type != type)
5870 if (conn->state != BT_CONNECTED)
5876 /* This function requires the caller holds hdev->lock */
5877 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5878 u8 addr_type, u8 auto_connect)
5880 struct hci_dev *hdev = req->hdev;
5881 struct hci_conn_params *params;
5883 params = hci_conn_params_add(hdev, addr, addr_type);
5887 if (params->auto_connect == auto_connect)
5890 list_del_init(¶ms->action);
5892 switch (auto_connect) {
5893 case HCI_AUTO_CONN_DISABLED:
5894 case HCI_AUTO_CONN_LINK_LOSS:
5895 __hci_update_background_scan(req);
5897 case HCI_AUTO_CONN_REPORT:
5898 list_add(¶ms->action, &hdev->pend_le_reports);
5899 __hci_update_background_scan(req);
5901 case HCI_AUTO_CONN_DIRECT:
5902 case HCI_AUTO_CONN_ALWAYS:
5903 if (!is_connected(hdev, addr, addr_type)) {
5904 list_add(¶ms->action, &hdev->pend_le_conns);
5905 __hci_update_background_scan(req);
5910 params->auto_connect = auto_connect;
5912 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5918 static void device_added(struct sock *sk, struct hci_dev *hdev,
5919 bdaddr_t *bdaddr, u8 type, u8 action)
5921 struct mgmt_ev_device_added ev;
5923 bacpy(&ev.addr.bdaddr, bdaddr);
5924 ev.addr.type = type;
5927 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5930 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5932 struct mgmt_pending_cmd *cmd;
5934 BT_DBG("status 0x%02x", status);
5938 cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
5942 cmd->cmd_complete(cmd, mgmt_status(status));
5943 mgmt_pending_remove(cmd);
5946 hci_dev_unlock(hdev);
5949 static int add_device(struct sock *sk, struct hci_dev *hdev,
5950 void *data, u16 len)
5952 struct mgmt_cp_add_device *cp = data;
5953 struct mgmt_pending_cmd *cmd;
5954 struct hci_request req;
5955 u8 auto_conn, addr_type;
5958 BT_DBG("%s", hdev->name);
5960 if (!bdaddr_type_is_valid(cp->addr.type) ||
5961 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5962 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5963 MGMT_STATUS_INVALID_PARAMS,
5964 &cp->addr, sizeof(cp->addr));
5966 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5967 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5968 MGMT_STATUS_INVALID_PARAMS,
5969 &cp->addr, sizeof(cp->addr));
5971 hci_req_init(&req, hdev);
5975 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5981 cmd->cmd_complete = addr_cmd_complete;
5983 if (cp->addr.type == BDADDR_BREDR) {
5984 /* Only incoming connections action is supported for now */
5985 if (cp->action != 0x01) {
5986 err = cmd->cmd_complete(cmd,
5987 MGMT_STATUS_INVALID_PARAMS);
5988 mgmt_pending_remove(cmd);
5992 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5997 __hci_update_page_scan(&req);
6002 if (cp->addr.type == BDADDR_LE_PUBLIC)
6003 addr_type = ADDR_LE_DEV_PUBLIC;
6005 addr_type = ADDR_LE_DEV_RANDOM;
6007 if (cp->action == 0x02)
6008 auto_conn = HCI_AUTO_CONN_ALWAYS;
6009 else if (cp->action == 0x01)
6010 auto_conn = HCI_AUTO_CONN_DIRECT;
6012 auto_conn = HCI_AUTO_CONN_REPORT;
6014 /* If the connection parameters don't exist for this device,
6015 * they will be created and configured with defaults.
6017 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
6019 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
6020 mgmt_pending_remove(cmd);
6025 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6027 err = hci_req_run(&req, add_device_complete);
6029 /* ENODATA means no HCI commands were needed (e.g. if
6030 * the adapter is powered off).
6032 if (err == -ENODATA)
6033 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6034 mgmt_pending_remove(cmd);
6038 hci_dev_unlock(hdev);
6042 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6043 bdaddr_t *bdaddr, u8 type)
6045 struct mgmt_ev_device_removed ev;
6047 bacpy(&ev.addr.bdaddr, bdaddr);
6048 ev.addr.type = type;
6050 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6053 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6055 struct mgmt_pending_cmd *cmd;
6057 BT_DBG("status 0x%02x", status);
6061 cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
6065 cmd->cmd_complete(cmd, mgmt_status(status));
6066 mgmt_pending_remove(cmd);
6069 hci_dev_unlock(hdev);
6072 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6073 void *data, u16 len)
6075 struct mgmt_cp_remove_device *cp = data;
6076 struct mgmt_pending_cmd *cmd;
6077 struct hci_request req;
6080 BT_DBG("%s", hdev->name);
6082 hci_req_init(&req, hdev);
6086 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
6092 cmd->cmd_complete = addr_cmd_complete;
6094 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6095 struct hci_conn_params *params;
6098 if (!bdaddr_type_is_valid(cp->addr.type)) {
6099 err = cmd->cmd_complete(cmd,
6100 MGMT_STATUS_INVALID_PARAMS);
6101 mgmt_pending_remove(cmd);
6105 if (cp->addr.type == BDADDR_BREDR) {
6106 err = hci_bdaddr_list_del(&hdev->whitelist,
6110 err = cmd->cmd_complete(cmd,
6111 MGMT_STATUS_INVALID_PARAMS);
6112 mgmt_pending_remove(cmd);
6116 __hci_update_page_scan(&req);
6118 device_removed(sk, hdev, &cp->addr.bdaddr,
6123 if (cp->addr.type == BDADDR_LE_PUBLIC)
6124 addr_type = ADDR_LE_DEV_PUBLIC;
6126 addr_type = ADDR_LE_DEV_RANDOM;
6128 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6131 err = cmd->cmd_complete(cmd,
6132 MGMT_STATUS_INVALID_PARAMS);
6133 mgmt_pending_remove(cmd);
6137 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
6138 err = cmd->cmd_complete(cmd,
6139 MGMT_STATUS_INVALID_PARAMS);
6140 mgmt_pending_remove(cmd);
6144 list_del(¶ms->action);
6145 list_del(¶ms->list);
6147 __hci_update_background_scan(&req);
6149 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6151 struct hci_conn_params *p, *tmp;
6152 struct bdaddr_list *b, *btmp;
6154 if (cp->addr.type) {
6155 err = cmd->cmd_complete(cmd,
6156 MGMT_STATUS_INVALID_PARAMS);
6157 mgmt_pending_remove(cmd);
6161 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6162 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6167 __hci_update_page_scan(&req);
6169 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6170 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6172 device_removed(sk, hdev, &p->addr, p->addr_type);
6173 list_del(&p->action);
6178 BT_DBG("All LE connection parameters were removed");
6180 __hci_update_background_scan(&req);
6184 err = hci_req_run(&req, remove_device_complete);
6186 /* ENODATA means no HCI commands were needed (e.g. if
6187 * the adapter is powered off).
6189 if (err == -ENODATA)
6190 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6191 mgmt_pending_remove(cmd);
6195 hci_dev_unlock(hdev);
6199 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6202 struct mgmt_cp_load_conn_param *cp = data;
6203 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6204 sizeof(struct mgmt_conn_param));
6205 u16 param_count, expected_len;
6208 if (!lmp_le_capable(hdev))
6209 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6210 MGMT_STATUS_NOT_SUPPORTED);
6212 param_count = __le16_to_cpu(cp->param_count);
6213 if (param_count > max_param_count) {
6214 BT_ERR("load_conn_param: too big param_count value %u",
6216 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6217 MGMT_STATUS_INVALID_PARAMS);
6220 expected_len = sizeof(*cp) + param_count *
6221 sizeof(struct mgmt_conn_param);
6222 if (expected_len != len) {
6223 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6225 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6226 MGMT_STATUS_INVALID_PARAMS);
6229 BT_DBG("%s param_count %u", hdev->name, param_count);
6233 hci_conn_params_clear_disabled(hdev);
6235 for (i = 0; i < param_count; i++) {
6236 struct mgmt_conn_param *param = &cp->params[i];
6237 struct hci_conn_params *hci_param;
6238 u16 min, max, latency, timeout;
6241 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
6244 if (param->addr.type == BDADDR_LE_PUBLIC) {
6245 addr_type = ADDR_LE_DEV_PUBLIC;
6246 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6247 addr_type = ADDR_LE_DEV_RANDOM;
6249 BT_ERR("Ignoring invalid connection parameters");
6253 min = le16_to_cpu(param->min_interval);
6254 max = le16_to_cpu(param->max_interval);
6255 latency = le16_to_cpu(param->latency);
6256 timeout = le16_to_cpu(param->timeout);
6258 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6259 min, max, latency, timeout);
6261 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6262 BT_ERR("Ignoring invalid connection parameters");
6266 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6269 BT_ERR("Failed to add connection parameters");
6273 hci_param->conn_min_interval = min;
6274 hci_param->conn_max_interval = max;
6275 hci_param->conn_latency = latency;
6276 hci_param->supervision_timeout = timeout;
6279 hci_dev_unlock(hdev);
6281 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6285 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6286 void *data, u16 len)
6288 struct mgmt_cp_set_external_config *cp = data;
6292 BT_DBG("%s", hdev->name);
6294 if (hdev_is_powered(hdev))
6295 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6296 MGMT_STATUS_REJECTED);
6298 if (cp->config != 0x00 && cp->config != 0x01)
6299 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6300 MGMT_STATUS_INVALID_PARAMS);
6302 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6303 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6304 MGMT_STATUS_NOT_SUPPORTED);
6309 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6311 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6313 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6320 err = new_options(hdev, sk);
6322 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6323 mgmt_index_removed(hdev);
6325 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6326 hci_dev_set_flag(hdev, HCI_CONFIG);
6327 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6329 queue_work(hdev->req_workqueue, &hdev->power_on);
6331 set_bit(HCI_RAW, &hdev->flags);
6332 mgmt_index_added(hdev);
6337 hci_dev_unlock(hdev);
6341 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6342 void *data, u16 len)
6344 struct mgmt_cp_set_public_address *cp = data;
6348 BT_DBG("%s", hdev->name);
6350 if (hdev_is_powered(hdev))
6351 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6352 MGMT_STATUS_REJECTED);
6354 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6355 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6356 MGMT_STATUS_INVALID_PARAMS);
6358 if (!hdev->set_bdaddr)
6359 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6360 MGMT_STATUS_NOT_SUPPORTED);
6364 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6365 bacpy(&hdev->public_addr, &cp->bdaddr);
6367 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6374 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6375 err = new_options(hdev, sk);
6377 if (is_configured(hdev)) {
6378 mgmt_index_removed(hdev);
6380 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6382 hci_dev_set_flag(hdev, HCI_CONFIG);
6383 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6385 queue_work(hdev->req_workqueue, &hdev->power_on);
6389 hci_dev_unlock(hdev);
6393 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6396 eir[eir_len++] = sizeof(type) + data_len;
6397 eir[eir_len++] = type;
6398 memcpy(&eir[eir_len], data, data_len);
6399 eir_len += data_len;
6404 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6405 void *data, u16 data_len)
6407 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6408 struct mgmt_rp_read_local_oob_ext_data *rp;
6411 u8 status, flags, role, addr[7], hash[16], rand[16];
6414 BT_DBG("%s", hdev->name);
6416 if (!hdev_is_powered(hdev))
6417 return mgmt_cmd_complete(sk, hdev->id,
6418 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6419 MGMT_STATUS_NOT_POWERED,
6420 &cp->type, sizeof(cp->type));
6423 case BIT(BDADDR_BREDR):
6424 status = mgmt_bredr_support(hdev);
6426 return mgmt_cmd_complete(sk, hdev->id,
6427 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6432 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6433 status = mgmt_le_support(hdev);
6435 return mgmt_cmd_complete(sk, hdev->id,
6436 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6439 eir_len = 9 + 3 + 18 + 18 + 3;
6442 return mgmt_cmd_complete(sk, hdev->id,
6443 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6444 MGMT_STATUS_INVALID_PARAMS,
6445 &cp->type, sizeof(cp->type));
6450 rp_len = sizeof(*rp) + eir_len;
6451 rp = kmalloc(rp_len, GFP_ATOMIC);
6453 hci_dev_unlock(hdev);
6459 case BIT(BDADDR_BREDR):
6460 eir_len = eir_append_data(rp->eir, eir_len, EIR_CLASS_OF_DEV,
6461 hdev->dev_class, 3);
6463 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6464 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6465 smp_generate_oob(hdev, hash, rand) < 0) {
6466 hci_dev_unlock(hdev);
6467 err = mgmt_cmd_complete(sk, hdev->id,
6468 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6470 &cp->type, sizeof(cp->type));
6474 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6475 memcpy(addr, &hdev->rpa, 6);
6477 } else if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6478 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6479 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6480 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6481 memcpy(addr, &hdev->static_addr, 6);
6484 memcpy(addr, &hdev->bdaddr, 6);
6488 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6489 addr, sizeof(addr));
6491 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6496 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6497 &role, sizeof(role));
6499 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6500 eir_len = eir_append_data(rp->eir, eir_len,
6502 hash, sizeof(hash));
6504 eir_len = eir_append_data(rp->eir, eir_len,
6506 rand, sizeof(rand));
6509 flags = get_adv_discov_flags(hdev);
6511 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6512 flags |= LE_AD_NO_BREDR;
6514 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6515 &flags, sizeof(flags));
6519 rp->type = cp->type;
6520 rp->eir_len = cpu_to_le16(eir_len);
6522 hci_dev_unlock(hdev);
6524 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6526 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6527 MGMT_STATUS_SUCCESS, rp, sizeof(*rp) + eir_len);
6531 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6532 rp, sizeof(*rp) + eir_len,
6533 HCI_MGMT_OOB_DATA_EVENTS, sk);
6541 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6545 flags |= MGMT_ADV_FLAG_CONNECTABLE;
6546 flags |= MGMT_ADV_FLAG_DISCOV;
6547 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6548 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6550 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
6551 flags |= MGMT_ADV_FLAG_TX_POWER;
6556 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6557 void *data, u16 data_len)
6559 struct mgmt_rp_read_adv_features *rp;
6563 u32 supported_flags;
6565 BT_DBG("%s", hdev->name);
6567 if (!lmp_le_capable(hdev))
6568 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6569 MGMT_STATUS_REJECTED);
6573 rp_len = sizeof(*rp);
6575 /* Currently only one instance is supported, so just add 1 to the
6578 instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
6582 rp = kmalloc(rp_len, GFP_ATOMIC);
6584 hci_dev_unlock(hdev);
6588 supported_flags = get_supported_adv_flags(hdev);
6590 rp->supported_flags = cpu_to_le32(supported_flags);
6591 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6592 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6593 rp->max_instances = 1;
6595 /* Currently only one instance is supported, so simply return the
6596 * current instance number.
6599 rp->num_instances = 1;
6600 rp->instance[0] = 1;
6602 rp->num_instances = 0;
6605 hci_dev_unlock(hdev);
6607 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6608 MGMT_STATUS_SUCCESS, rp, rp_len);
6615 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6616 u8 len, bool is_adv_data)
6618 u8 max_len = HCI_MAX_AD_LENGTH;
6620 bool flags_managed = false;
6621 bool tx_power_managed = false;
6622 u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
6623 MGMT_ADV_FLAG_MANAGED_FLAGS;
6625 if (is_adv_data && (adv_flags & flags_params)) {
6626 flags_managed = true;
6630 if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
6631 tx_power_managed = true;
6638 /* Make sure that the data is correctly formatted. */
6639 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6642 if (flags_managed && data[i + 1] == EIR_FLAGS)
6645 if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
6648 /* If the current field length would exceed the total data
6649 * length, then it's invalid.
6651 if (i + cur_len >= len)
6658 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6661 struct mgmt_pending_cmd *cmd;
6662 struct mgmt_rp_add_advertising rp;
6664 BT_DBG("status %d", status);
6668 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6671 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6672 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6673 advertising_removed(cmd ? cmd->sk : NULL, hdev, 1);
6682 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6683 mgmt_status(status));
6685 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6686 mgmt_status(status), &rp, sizeof(rp));
6688 mgmt_pending_remove(cmd);
6691 hci_dev_unlock(hdev);
6694 static void adv_timeout_expired(struct work_struct *work)
6696 struct hci_dev *hdev = container_of(work, struct hci_dev,
6697 adv_instance.timeout_exp.work);
6699 hdev->adv_instance.timeout = 0;
6702 clear_adv_instance(hdev);
6703 hci_dev_unlock(hdev);
6706 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6707 void *data, u16 data_len)
6709 struct mgmt_cp_add_advertising *cp = data;
6710 struct mgmt_rp_add_advertising rp;
6712 u32 supported_flags;
6716 struct mgmt_pending_cmd *cmd;
6717 struct hci_request req;
6719 BT_DBG("%s", hdev->name);
6721 status = mgmt_le_support(hdev);
6723 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6726 flags = __le32_to_cpu(cp->flags);
6727 timeout = __le16_to_cpu(cp->timeout);
6729 /* The current implementation only supports adding one instance and only
6730 * a subset of the specified flags.
6732 supported_flags = get_supported_adv_flags(hdev);
6733 if (cp->instance != 0x01 || (flags & ~supported_flags))
6734 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6735 MGMT_STATUS_INVALID_PARAMS);
6739 if (timeout && !hdev_is_powered(hdev)) {
6740 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6741 MGMT_STATUS_REJECTED);
6745 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6746 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6747 pending_find(MGMT_OP_SET_LE, hdev)) {
6748 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6753 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6754 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6755 cp->scan_rsp_len, false)) {
6756 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6757 MGMT_STATUS_INVALID_PARAMS);
6761 INIT_DELAYED_WORK(&hdev->adv_instance.timeout_exp, adv_timeout_expired);
6763 hdev->adv_instance.flags = flags;
6764 hdev->adv_instance.adv_data_len = cp->adv_data_len;
6765 hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len;
6767 if (cp->adv_data_len)
6768 memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len);
6770 if (cp->scan_rsp_len)
6771 memcpy(hdev->adv_instance.scan_rsp_data,
6772 cp->data + cp->adv_data_len, cp->scan_rsp_len);
6774 if (hdev->adv_instance.timeout)
6775 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
6777 hdev->adv_instance.timeout = timeout;
6780 queue_delayed_work(hdev->workqueue,
6781 &hdev->adv_instance.timeout_exp,
6782 msecs_to_jiffies(timeout * 1000));
6784 if (!hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING_INSTANCE))
6785 advertising_added(sk, hdev, 1);
6787 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
6788 * we have no HCI communication to make. Simply return.
6790 if (!hdev_is_powered(hdev) ||
6791 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6793 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6794 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6798 /* We're good to go, update advertising data, parameters, and start
6801 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6808 hci_req_init(&req, hdev);
6810 update_adv_data(&req);
6811 update_scan_rsp_data(&req);
6812 enable_advertising(&req);
6814 err = hci_req_run(&req, add_advertising_complete);
6816 mgmt_pending_remove(cmd);
6819 hci_dev_unlock(hdev);
6824 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6827 struct mgmt_pending_cmd *cmd;
6828 struct mgmt_rp_remove_advertising rp;
6830 BT_DBG("status %d", status);
6834 /* A failure status here only means that we failed to disable
6835 * advertising. Otherwise, the advertising instance has been removed,
6836 * so report success.
6838 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6844 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6846 mgmt_pending_remove(cmd);
6849 hci_dev_unlock(hdev);
6852 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6853 void *data, u16 data_len)
6855 struct mgmt_cp_remove_advertising *cp = data;
6856 struct mgmt_rp_remove_advertising rp;
6858 struct mgmt_pending_cmd *cmd;
6859 struct hci_request req;
6861 BT_DBG("%s", hdev->name);
6863 /* The current implementation only allows modifying instance no 1. A
6864 * value of 0 indicates that all instances should be cleared.
6866 if (cp->instance > 1)
6867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6868 MGMT_STATUS_INVALID_PARAMS);
6872 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6873 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6874 pending_find(MGMT_OP_SET_LE, hdev)) {
6875 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6880 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
6881 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6882 MGMT_STATUS_INVALID_PARAMS);
6886 if (hdev->adv_instance.timeout)
6887 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
6889 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6891 advertising_removed(sk, hdev, 1);
6893 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6895 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
6896 * we have no HCI communication to make. Simply return.
6898 if (!hdev_is_powered(hdev) ||
6899 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6901 err = mgmt_cmd_complete(sk, hdev->id,
6902 MGMT_OP_REMOVE_ADVERTISING,
6903 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6907 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6914 hci_req_init(&req, hdev);
6915 disable_advertising(&req);
6917 err = hci_req_run(&req, remove_advertising_complete);
6919 mgmt_pending_remove(cmd);
6922 hci_dev_unlock(hdev);
6927 static const struct hci_mgmt_handler mgmt_handlers[] = {
6928 { NULL }, /* 0x0000 (no command) */
6929 { read_version, MGMT_READ_VERSION_SIZE,
6931 HCI_MGMT_UNTRUSTED },
6932 { read_commands, MGMT_READ_COMMANDS_SIZE,
6934 HCI_MGMT_UNTRUSTED },
6935 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6937 HCI_MGMT_UNTRUSTED },
6938 { read_controller_info, MGMT_READ_INFO_SIZE,
6939 HCI_MGMT_UNTRUSTED },
6940 { set_powered, MGMT_SETTING_SIZE },
6941 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
6942 { set_connectable, MGMT_SETTING_SIZE },
6943 { set_fast_connectable, MGMT_SETTING_SIZE },
6944 { set_bondable, MGMT_SETTING_SIZE },
6945 { set_link_security, MGMT_SETTING_SIZE },
6946 { set_ssp, MGMT_SETTING_SIZE },
6947 { set_hs, MGMT_SETTING_SIZE },
6948 { set_le, MGMT_SETTING_SIZE },
6949 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
6950 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
6951 { add_uuid, MGMT_ADD_UUID_SIZE },
6952 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
6953 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6955 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6957 { disconnect, MGMT_DISCONNECT_SIZE },
6958 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
6959 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
6960 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6961 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
6962 { pair_device, MGMT_PAIR_DEVICE_SIZE },
6963 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6964 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
6965 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
6966 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6967 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
6968 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6969 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6970 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6972 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6973 { start_discovery, MGMT_START_DISCOVERY_SIZE },
6974 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
6975 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
6976 { block_device, MGMT_BLOCK_DEVICE_SIZE },
6977 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
6978 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
6979 { set_advertising, MGMT_SETTING_SIZE },
6980 { set_bredr, MGMT_SETTING_SIZE },
6981 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
6982 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
6983 { set_secure_conn, MGMT_SETTING_SIZE },
6984 { set_debug_keys, MGMT_SETTING_SIZE },
6985 { set_privacy, MGMT_SET_PRIVACY_SIZE },
6986 { load_irks, MGMT_LOAD_IRKS_SIZE,
6988 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
6989 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
6990 { add_device, MGMT_ADD_DEVICE_SIZE },
6991 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
6992 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6994 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6996 HCI_MGMT_UNTRUSTED },
6997 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6998 HCI_MGMT_UNCONFIGURED |
6999 HCI_MGMT_UNTRUSTED },
7000 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
7001 HCI_MGMT_UNCONFIGURED },
7002 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
7003 HCI_MGMT_UNCONFIGURED },
7004 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7006 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7007 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
7009 HCI_MGMT_UNTRUSTED },
7010 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
7011 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
7013 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
7016 void mgmt_index_added(struct hci_dev *hdev)
7018 struct mgmt_ev_ext_index ev;
7020 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7023 switch (hdev->dev_type) {
7025 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7026 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7027 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7030 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7031 HCI_MGMT_INDEX_EVENTS);
7044 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7045 HCI_MGMT_EXT_INDEX_EVENTS);
7048 void mgmt_index_removed(struct hci_dev *hdev)
7050 struct mgmt_ev_ext_index ev;
7051 u8 status = MGMT_STATUS_INVALID_INDEX;
7053 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7056 switch (hdev->dev_type) {
7058 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7060 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7061 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7062 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7065 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7066 HCI_MGMT_INDEX_EVENTS);
7079 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7080 HCI_MGMT_EXT_INDEX_EVENTS);
7083 /* This function requires the caller holds hdev->lock */
7084 static void restart_le_actions(struct hci_request *req)
7086 struct hci_dev *hdev = req->hdev;
7087 struct hci_conn_params *p;
7089 list_for_each_entry(p, &hdev->le_conn_params, list) {
7090 /* Needed for AUTO_OFF case where might not "really"
7091 * have been powered off.
7093 list_del_init(&p->action);
7095 switch (p->auto_connect) {
7096 case HCI_AUTO_CONN_DIRECT:
7097 case HCI_AUTO_CONN_ALWAYS:
7098 list_add(&p->action, &hdev->pend_le_conns);
7100 case HCI_AUTO_CONN_REPORT:
7101 list_add(&p->action, &hdev->pend_le_reports);
7108 __hci_update_background_scan(req);
7111 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7113 struct cmd_lookup match = { NULL, hdev };
7115 BT_DBG("status 0x%02x", status);
7118 /* Register the available SMP channels (BR/EDR and LE) only
7119 * when successfully powering on the controller. This late
7120 * registration is required so that LE SMP can clearly
7121 * decide if the public address or static address is used.
7128 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7130 new_settings(hdev, match.sk);
7132 hci_dev_unlock(hdev);
7138 static int powered_update_hci(struct hci_dev *hdev)
7140 struct hci_request req;
7143 hci_req_init(&req, hdev);
7145 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
7146 !lmp_host_ssp_capable(hdev)) {
7149 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
7151 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
7154 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
7155 sizeof(support), &support);
7159 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7160 lmp_bredr_capable(hdev)) {
7161 struct hci_cp_write_le_host_supported cp;
7166 /* Check first if we already have the right
7167 * host state (host features set)
7169 if (cp.le != lmp_host_le_capable(hdev) ||
7170 cp.simul != lmp_host_le_br_capable(hdev))
7171 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
7175 if (lmp_le_capable(hdev)) {
7176 /* Make sure the controller has a good default for
7177 * advertising data. This also applies to the case
7178 * where BR/EDR was toggled during the AUTO_OFF phase.
7180 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
7181 update_adv_data(&req);
7182 update_scan_rsp_data(&req);
7185 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7186 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
7187 enable_advertising(&req);
7189 restart_le_actions(&req);
7192 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
7193 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
7194 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
7195 sizeof(link_sec), &link_sec);
7197 if (lmp_bredr_capable(hdev)) {
7198 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
7199 write_fast_connectable(&req, true);
7201 write_fast_connectable(&req, false);
7202 __hci_update_page_scan(&req);
7208 return hci_req_run(&req, powered_complete);
7211 int mgmt_powered(struct hci_dev *hdev, u8 powered)
7213 struct cmd_lookup match = { NULL, hdev };
7214 u8 status, zero_cod[] = { 0, 0, 0 };
7217 if (!hci_dev_test_flag(hdev, HCI_MGMT))
7221 if (powered_update_hci(hdev) == 0)
7224 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
7229 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7231 /* If the power off is because of hdev unregistration let
7232 * use the appropriate INVALID_INDEX status. Otherwise use
7233 * NOT_POWERED. We cover both scenarios here since later in
7234 * mgmt_index_removed() any hci_conn callbacks will have already
7235 * been triggered, potentially causing misleading DISCONNECTED
7238 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7239 status = MGMT_STATUS_INVALID_INDEX;
7241 status = MGMT_STATUS_NOT_POWERED;
7243 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7245 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
7246 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7247 zero_cod, sizeof(zero_cod), NULL);
7250 err = new_settings(hdev, match.sk);
7258 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7260 struct mgmt_pending_cmd *cmd;
7263 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7267 if (err == -ERFKILL)
7268 status = MGMT_STATUS_RFKILLED;
7270 status = MGMT_STATUS_FAILED;
7272 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7274 mgmt_pending_remove(cmd);
7277 void mgmt_discoverable_timeout(struct hci_dev *hdev)
7279 struct hci_request req;
7283 /* When discoverable timeout triggers, then just make sure
7284 * the limited discoverable flag is cleared. Even in the case
7285 * of a timeout triggered from general discoverable, it is
7286 * safe to unconditionally clear the flag.
7288 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
7289 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
7291 hci_req_init(&req, hdev);
7292 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7293 u8 scan = SCAN_PAGE;
7294 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
7295 sizeof(scan), &scan);
7299 /* Advertising instances don't use the global discoverable setting, so
7300 * only update AD if advertising was enabled using Set Advertising.
7302 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7303 update_adv_data(&req);
7305 hci_req_run(&req, NULL);
7307 hdev->discov_timeout = 0;
7309 new_settings(hdev, NULL);
7311 hci_dev_unlock(hdev);
7314 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7317 struct mgmt_ev_new_link_key ev;
7319 memset(&ev, 0, sizeof(ev));
7321 ev.store_hint = persistent;
7322 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7323 ev.key.addr.type = BDADDR_BREDR;
7324 ev.key.type = key->type;
7325 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7326 ev.key.pin_len = key->pin_len;
7328 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7331 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7333 switch (ltk->type) {
7336 if (ltk->authenticated)
7337 return MGMT_LTK_AUTHENTICATED;
7338 return MGMT_LTK_UNAUTHENTICATED;
7340 if (ltk->authenticated)
7341 return MGMT_LTK_P256_AUTH;
7342 return MGMT_LTK_P256_UNAUTH;
7343 case SMP_LTK_P256_DEBUG:
7344 return MGMT_LTK_P256_DEBUG;
7347 return MGMT_LTK_UNAUTHENTICATED;
7350 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7352 struct mgmt_ev_new_long_term_key ev;
7354 memset(&ev, 0, sizeof(ev));
7356 /* Devices using resolvable or non-resolvable random addresses
7357 * without providing an indentity resolving key don't require
7358 * to store long term keys. Their addresses will change the
7361 * Only when a remote device provides an identity address
7362 * make sure the long term key is stored. If the remote
7363 * identity is known, the long term keys are internally
7364 * mapped to the identity address. So allow static random
7365 * and public addresses here.
7367 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7368 (key->bdaddr.b[5] & 0xc0) != 0xc0)
7369 ev.store_hint = 0x00;
7371 ev.store_hint = persistent;
7373 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7374 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7375 ev.key.type = mgmt_ltk_type(key);
7376 ev.key.enc_size = key->enc_size;
7377 ev.key.ediv = key->ediv;
7378 ev.key.rand = key->rand;
7380 if (key->type == SMP_LTK)
7383 memcpy(ev.key.val, key->val, sizeof(key->val));
7385 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7388 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
7390 struct mgmt_ev_new_irk ev;
7392 memset(&ev, 0, sizeof(ev));
7394 /* For identity resolving keys from devices that are already
7395 * using a public address or static random address, do not
7396 * ask for storing this key. The identity resolving key really
7397 * is only mandatory for devices using resovlable random
7400 * Storing all identity resolving keys has the downside that
7401 * they will be also loaded on next boot of they system. More
7402 * identity resolving keys, means more time during scanning is
7403 * needed to actually resolve these addresses.
7405 if (bacmp(&irk->rpa, BDADDR_ANY))
7406 ev.store_hint = 0x01;
7408 ev.store_hint = 0x00;
7410 bacpy(&ev.rpa, &irk->rpa);
7411 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7412 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7413 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7415 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7418 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7421 struct mgmt_ev_new_csrk ev;
7423 memset(&ev, 0, sizeof(ev));
7425 /* Devices using resolvable or non-resolvable random addresses
7426 * without providing an indentity resolving key don't require
7427 * to store signature resolving keys. Their addresses will change
7428 * the next time around.
7430 * Only when a remote device provides an identity address
7431 * make sure the signature resolving key is stored. So allow
7432 * static random and public addresses here.
7434 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7435 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7436 ev.store_hint = 0x00;
7438 ev.store_hint = persistent;
7440 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7441 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7442 ev.key.type = csrk->type;
7443 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7445 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7448 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7449 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7450 u16 max_interval, u16 latency, u16 timeout)
7452 struct mgmt_ev_new_conn_param ev;
7454 if (!hci_is_identity_address(bdaddr, bdaddr_type))
7457 memset(&ev, 0, sizeof(ev));
7458 bacpy(&ev.addr.bdaddr, bdaddr);
7459 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7460 ev.store_hint = store_hint;
7461 ev.min_interval = cpu_to_le16(min_interval);
7462 ev.max_interval = cpu_to_le16(max_interval);
7463 ev.latency = cpu_to_le16(latency);
7464 ev.timeout = cpu_to_le16(timeout);
7466 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7469 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7470 u32 flags, u8 *name, u8 name_len)
7473 struct mgmt_ev_device_connected *ev = (void *) buf;
7476 bacpy(&ev->addr.bdaddr, &conn->dst);
7477 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7479 ev->flags = __cpu_to_le32(flags);
7481 /* We must ensure that the EIR Data fields are ordered and
7482 * unique. Keep it simple for now and avoid the problem by not
7483 * adding any BR/EDR data to the LE adv.
7485 if (conn->le_adv_data_len > 0) {
7486 memcpy(&ev->eir[eir_len],
7487 conn->le_adv_data, conn->le_adv_data_len);
7488 eir_len = conn->le_adv_data_len;
7491 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7494 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7495 eir_len = eir_append_data(ev->eir, eir_len,
7497 conn->dev_class, 3);
7500 ev->eir_len = cpu_to_le16(eir_len);
7502 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7503 sizeof(*ev) + eir_len, NULL);
7506 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7508 struct sock **sk = data;
7510 cmd->cmd_complete(cmd, 0);
7515 mgmt_pending_remove(cmd);
7518 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7520 struct hci_dev *hdev = data;
7521 struct mgmt_cp_unpair_device *cp = cmd->param;
7523 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7525 cmd->cmd_complete(cmd, 0);
7526 mgmt_pending_remove(cmd);
7529 bool mgmt_powering_down(struct hci_dev *hdev)
7531 struct mgmt_pending_cmd *cmd;
7532 struct mgmt_mode *cp;
7534 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7545 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7546 u8 link_type, u8 addr_type, u8 reason,
7547 bool mgmt_connected)
7549 struct mgmt_ev_device_disconnected ev;
7550 struct sock *sk = NULL;
7552 /* The connection is still in hci_conn_hash so test for 1
7553 * instead of 0 to know if this is the last one.
7555 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7556 cancel_delayed_work(&hdev->power_off);
7557 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7560 if (!mgmt_connected)
7563 if (link_type != ACL_LINK && link_type != LE_LINK)
7566 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7568 bacpy(&ev.addr.bdaddr, bdaddr);
7569 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7572 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7577 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7581 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7582 u8 link_type, u8 addr_type, u8 status)
7584 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7585 struct mgmt_cp_disconnect *cp;
7586 struct mgmt_pending_cmd *cmd;
7588 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7591 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7597 if (bacmp(bdaddr, &cp->addr.bdaddr))
7600 if (cp->addr.type != bdaddr_type)
7603 cmd->cmd_complete(cmd, mgmt_status(status));
7604 mgmt_pending_remove(cmd);
7607 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7608 u8 addr_type, u8 status)
7610 struct mgmt_ev_connect_failed ev;
7612 /* The connection is still in hci_conn_hash so test for 1
7613 * instead of 0 to know if this is the last one.
7615 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7616 cancel_delayed_work(&hdev->power_off);
7617 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7620 bacpy(&ev.addr.bdaddr, bdaddr);
7621 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7622 ev.status = mgmt_status(status);
7624 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7627 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7629 struct mgmt_ev_pin_code_request ev;
7631 bacpy(&ev.addr.bdaddr, bdaddr);
7632 ev.addr.type = BDADDR_BREDR;
7635 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7638 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7641 struct mgmt_pending_cmd *cmd;
7643 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7647 cmd->cmd_complete(cmd, mgmt_status(status));
7648 mgmt_pending_remove(cmd);
7651 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7654 struct mgmt_pending_cmd *cmd;
7656 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7660 cmd->cmd_complete(cmd, mgmt_status(status));
7661 mgmt_pending_remove(cmd);
7664 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7665 u8 link_type, u8 addr_type, u32 value,
7668 struct mgmt_ev_user_confirm_request ev;
7670 BT_DBG("%s", hdev->name);
7672 bacpy(&ev.addr.bdaddr, bdaddr);
7673 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7674 ev.confirm_hint = confirm_hint;
7675 ev.value = cpu_to_le32(value);
7677 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7681 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7682 u8 link_type, u8 addr_type)
7684 struct mgmt_ev_user_passkey_request ev;
7686 BT_DBG("%s", hdev->name);
7688 bacpy(&ev.addr.bdaddr, bdaddr);
7689 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7691 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7695 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7696 u8 link_type, u8 addr_type, u8 status,
7699 struct mgmt_pending_cmd *cmd;
7701 cmd = pending_find(opcode, hdev);
7705 cmd->cmd_complete(cmd, mgmt_status(status));
7706 mgmt_pending_remove(cmd);
7711 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7712 u8 link_type, u8 addr_type, u8 status)
7714 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7715 status, MGMT_OP_USER_CONFIRM_REPLY);
7718 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7719 u8 link_type, u8 addr_type, u8 status)
7721 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7723 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7726 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7727 u8 link_type, u8 addr_type, u8 status)
7729 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7730 status, MGMT_OP_USER_PASSKEY_REPLY);
7733 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7734 u8 link_type, u8 addr_type, u8 status)
7736 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7738 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7741 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7742 u8 link_type, u8 addr_type, u32 passkey,
7745 struct mgmt_ev_passkey_notify ev;
7747 BT_DBG("%s", hdev->name);
7749 bacpy(&ev.addr.bdaddr, bdaddr);
7750 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7751 ev.passkey = __cpu_to_le32(passkey);
7752 ev.entered = entered;
7754 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7757 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7759 struct mgmt_ev_auth_failed ev;
7760 struct mgmt_pending_cmd *cmd;
7761 u8 status = mgmt_status(hci_status);
7763 bacpy(&ev.addr.bdaddr, &conn->dst);
7764 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7767 cmd = find_pairing(conn);
7769 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7770 cmd ? cmd->sk : NULL);
7773 cmd->cmd_complete(cmd, status);
7774 mgmt_pending_remove(cmd);
7778 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7780 struct cmd_lookup match = { NULL, hdev };
7784 u8 mgmt_err = mgmt_status(status);
7785 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7786 cmd_status_rsp, &mgmt_err);
7790 if (test_bit(HCI_AUTH, &hdev->flags))
7791 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7793 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7795 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7799 new_settings(hdev, match.sk);
7805 static void clear_eir(struct hci_request *req)
7807 struct hci_dev *hdev = req->hdev;
7808 struct hci_cp_write_eir cp;
7810 if (!lmp_ext_inq_capable(hdev))
7813 memset(hdev->eir, 0, sizeof(hdev->eir));
7815 memset(&cp, 0, sizeof(cp));
7817 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7820 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7822 struct cmd_lookup match = { NULL, hdev };
7823 struct hci_request req;
7824 bool changed = false;
7827 u8 mgmt_err = mgmt_status(status);
7829 if (enable && hci_dev_test_and_clear_flag(hdev,
7831 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7832 new_settings(hdev, NULL);
7835 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7841 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7843 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7845 changed = hci_dev_test_and_clear_flag(hdev,
7848 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7851 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7854 new_settings(hdev, match.sk);
7859 hci_req_init(&req, hdev);
7861 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7862 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7863 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7864 sizeof(enable), &enable);
7870 hci_req_run(&req, NULL);
7873 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7875 struct cmd_lookup *match = data;
7877 if (match->sk == NULL) {
7878 match->sk = cmd->sk;
7879 sock_hold(match->sk);
7883 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7886 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7888 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7889 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7890 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7893 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7894 dev_class, 3, NULL);
7900 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7902 struct mgmt_cp_set_local_name ev;
7903 struct mgmt_pending_cmd *cmd;
7908 memset(&ev, 0, sizeof(ev));
7909 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7910 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7912 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7914 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7916 /* If this is a HCI command related to powering on the
7917 * HCI dev don't send any mgmt signals.
7919 if (pending_find(MGMT_OP_SET_POWERED, hdev))
7923 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7924 cmd ? cmd->sk : NULL);
7927 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7928 u8 *rand192, u8 *hash256, u8 *rand256,
7931 struct mgmt_pending_cmd *cmd;
7933 BT_DBG("%s status %u", hdev->name, status);
7935 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7940 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7941 mgmt_status(status));
7943 struct mgmt_rp_read_local_oob_data rp;
7944 size_t rp_size = sizeof(rp);
7946 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7947 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7949 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7950 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7951 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7953 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7956 mgmt_cmd_complete(cmd->sk, hdev->id,
7957 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7961 mgmt_pending_remove(cmd);
7964 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7968 for (i = 0; i < uuid_count; i++) {
7969 if (!memcmp(uuid, uuids[i], 16))
7976 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7980 while (parsed < eir_len) {
7981 u8 field_len = eir[0];
7988 if (eir_len - parsed < field_len + 1)
7992 case EIR_UUID16_ALL:
7993 case EIR_UUID16_SOME:
7994 for (i = 0; i + 3 <= field_len; i += 2) {
7995 memcpy(uuid, bluetooth_base_uuid, 16);
7996 uuid[13] = eir[i + 3];
7997 uuid[12] = eir[i + 2];
7998 if (has_uuid(uuid, uuid_count, uuids))
8002 case EIR_UUID32_ALL:
8003 case EIR_UUID32_SOME:
8004 for (i = 0; i + 5 <= field_len; i += 4) {
8005 memcpy(uuid, bluetooth_base_uuid, 16);
8006 uuid[15] = eir[i + 5];
8007 uuid[14] = eir[i + 4];
8008 uuid[13] = eir[i + 3];
8009 uuid[12] = eir[i + 2];
8010 if (has_uuid(uuid, uuid_count, uuids))
8014 case EIR_UUID128_ALL:
8015 case EIR_UUID128_SOME:
8016 for (i = 0; i + 17 <= field_len; i += 16) {
8017 memcpy(uuid, eir + i + 2, 16);
8018 if (has_uuid(uuid, uuid_count, uuids))
8024 parsed += field_len + 1;
8025 eir += field_len + 1;
8031 static void restart_le_scan(struct hci_dev *hdev)
8033 /* If controller is not scanning we are done. */
8034 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8037 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8038 hdev->discovery.scan_start +
8039 hdev->discovery.scan_duration))
8042 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
8043 DISCOV_LE_RESTART_DELAY);
8046 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8047 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8049 /* If a RSSI threshold has been specified, and
8050 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8051 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8052 * is set, let it through for further processing, as we might need to
8055 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8056 * the results are also dropped.
8058 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8059 (rssi == HCI_RSSI_INVALID ||
8060 (rssi < hdev->discovery.rssi &&
8061 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8064 if (hdev->discovery.uuid_count != 0) {
8065 /* If a list of UUIDs is provided in filter, results with no
8066 * matching UUID should be dropped.
8068 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8069 hdev->discovery.uuids) &&
8070 !eir_has_uuids(scan_rsp, scan_rsp_len,
8071 hdev->discovery.uuid_count,
8072 hdev->discovery.uuids))
8076 /* If duplicate filtering does not report RSSI changes, then restart
8077 * scanning to ensure updated result with updated RSSI values.
8079 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8080 restart_le_scan(hdev);
8082 /* Validate RSSI value against the RSSI threshold once more. */
8083 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8084 rssi < hdev->discovery.rssi)
8091 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8092 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8093 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8096 struct mgmt_ev_device_found *ev = (void *)buf;
8099 /* Don't send events for a non-kernel initiated discovery. With
8100 * LE one exception is if we have pend_le_reports > 0 in which
8101 * case we're doing passive scanning and want these events.
8103 if (!hci_discovery_active(hdev)) {
8104 if (link_type == ACL_LINK)
8106 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
8110 if (hdev->discovery.result_filtering) {
8111 /* We are using service discovery */
8112 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8117 /* Make sure that the buffer is big enough. The 5 extra bytes
8118 * are for the potential CoD field.
8120 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8123 memset(buf, 0, sizeof(buf));
8125 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8126 * RSSI value was reported as 0 when not available. This behavior
8127 * is kept when using device discovery. This is required for full
8128 * backwards compatibility with the API.
8130 * However when using service discovery, the value 127 will be
8131 * returned when the RSSI is not available.
8133 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8134 link_type == ACL_LINK)
8137 bacpy(&ev->addr.bdaddr, bdaddr);
8138 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8140 ev->flags = cpu_to_le32(flags);
8143 /* Copy EIR or advertising data into event */
8144 memcpy(ev->eir, eir, eir_len);
8146 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
8147 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8150 if (scan_rsp_len > 0)
8151 /* Append scan response data to event */
8152 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8154 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8155 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8157 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8160 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8161 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8163 struct mgmt_ev_device_found *ev;
8164 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8167 ev = (struct mgmt_ev_device_found *) buf;
8169 memset(buf, 0, sizeof(buf));
8171 bacpy(&ev->addr.bdaddr, bdaddr);
8172 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8175 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8178 ev->eir_len = cpu_to_le16(eir_len);
8180 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8183 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8185 struct mgmt_ev_discovering ev;
8187 BT_DBG("%s discovering %u", hdev->name, discovering);
8189 memset(&ev, 0, sizeof(ev));
8190 ev.type = hdev->discovery.type;
8191 ev.discovering = discovering;
8193 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8196 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8198 BT_DBG("%s status %u", hdev->name, status);
8201 void mgmt_reenable_advertising(struct hci_dev *hdev)
8203 struct hci_request req;
8205 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
8206 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
8209 hci_req_init(&req, hdev);
8210 enable_advertising(&req);
8211 hci_req_run(&req, adv_enable_complete);
8214 static struct hci_mgmt_chan chan = {
8215 .channel = HCI_CHANNEL_CONTROL,
8216 .handler_count = ARRAY_SIZE(mgmt_handlers),
8217 .handlers = mgmt_handlers,
8218 .hdev_init = mgmt_init_hdev,
8223 return hci_mgmt_chan_register(&chan);
8226 void mgmt_exit(void)
8228 hci_mgmt_chan_unregister(&chan);