2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 9
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
51 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
107 static const u16 mgmt_events[] = {
108 MGMT_EV_CONTROLLER_ERROR,
110 MGMT_EV_INDEX_REMOVED,
111 MGMT_EV_NEW_SETTINGS,
112 MGMT_EV_CLASS_OF_DEV_CHANGED,
113 MGMT_EV_LOCAL_NAME_CHANGED,
114 MGMT_EV_NEW_LINK_KEY,
115 MGMT_EV_NEW_LONG_TERM_KEY,
116 MGMT_EV_DEVICE_CONNECTED,
117 MGMT_EV_DEVICE_DISCONNECTED,
118 MGMT_EV_CONNECT_FAILED,
119 MGMT_EV_PIN_CODE_REQUEST,
120 MGMT_EV_USER_CONFIRM_REQUEST,
121 MGMT_EV_USER_PASSKEY_REQUEST,
123 MGMT_EV_DEVICE_FOUND,
125 MGMT_EV_DEVICE_BLOCKED,
126 MGMT_EV_DEVICE_UNBLOCKED,
127 MGMT_EV_DEVICE_UNPAIRED,
128 MGMT_EV_PASSKEY_NOTIFY,
131 MGMT_EV_DEVICE_ADDED,
132 MGMT_EV_DEVICE_REMOVED,
133 MGMT_EV_NEW_CONN_PARAM,
134 MGMT_EV_UNCONF_INDEX_ADDED,
135 MGMT_EV_UNCONF_INDEX_REMOVED,
136 MGMT_EV_NEW_CONFIG_OPTIONS,
137 MGMT_EV_EXT_INDEX_ADDED,
138 MGMT_EV_EXT_INDEX_REMOVED,
139 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 MGMT_EV_ADVERTISING_ADDED,
141 MGMT_EV_ADVERTISING_REMOVED,
144 static const u16 mgmt_untrusted_commands[] = {
145 MGMT_OP_READ_INDEX_LIST,
147 MGMT_OP_READ_UNCONF_INDEX_LIST,
148 MGMT_OP_READ_CONFIG_INFO,
149 MGMT_OP_READ_EXT_INDEX_LIST,
152 static const u16 mgmt_untrusted_events[] = {
154 MGMT_EV_INDEX_REMOVED,
155 MGMT_EV_NEW_SETTINGS,
156 MGMT_EV_CLASS_OF_DEV_CHANGED,
157 MGMT_EV_LOCAL_NAME_CHANGED,
158 MGMT_EV_UNCONF_INDEX_ADDED,
159 MGMT_EV_UNCONF_INDEX_REMOVED,
160 MGMT_EV_NEW_CONFIG_OPTIONS,
161 MGMT_EV_EXT_INDEX_ADDED,
162 MGMT_EV_EXT_INDEX_REMOVED,
165 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 "\x00\x00\x00\x00\x00\x00\x00\x00"
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table[] = {
173 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
174 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
175 MGMT_STATUS_FAILED, /* Hardware Failure */
176 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
177 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
178 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
179 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
180 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
181 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
182 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
183 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
184 MGMT_STATUS_BUSY, /* Command Disallowed */
185 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
186 MGMT_STATUS_REJECTED, /* Rejected Security */
187 MGMT_STATUS_REJECTED, /* Rejected Personal */
188 MGMT_STATUS_TIMEOUT, /* Host Timeout */
189 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
190 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
191 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
192 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
193 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
194 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
195 MGMT_STATUS_BUSY, /* Repeated Attempts */
196 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
197 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
198 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
199 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
200 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
201 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
202 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
203 MGMT_STATUS_FAILED, /* Unspecified Error */
204 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
205 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
206 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
207 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
208 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
209 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
210 MGMT_STATUS_FAILED, /* Unit Link Key Used */
211 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
212 MGMT_STATUS_TIMEOUT, /* Instant Passed */
213 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
214 MGMT_STATUS_FAILED, /* Transaction Collision */
215 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
216 MGMT_STATUS_REJECTED, /* QoS Rejected */
217 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
218 MGMT_STATUS_REJECTED, /* Insufficient Security */
219 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
220 MGMT_STATUS_BUSY, /* Role Switch Pending */
221 MGMT_STATUS_FAILED, /* Slot Violation */
222 MGMT_STATUS_FAILED, /* Role Switch Failed */
223 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
224 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
225 MGMT_STATUS_BUSY, /* Host Busy Pairing */
226 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
227 MGMT_STATUS_BUSY, /* Controller Busy */
228 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
229 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
230 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
231 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
232 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
235 static u8 mgmt_status(u8 hci_status)
237 if (hci_status < ARRAY_SIZE(mgmt_status_table))
238 return mgmt_status_table[hci_status];
240 return MGMT_STATUS_FAILED;
243 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
246 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
250 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
251 u16 len, int flag, struct sock *skip_sk)
253 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
257 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
258 u16 len, struct sock *skip_sk)
260 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 HCI_MGMT_GENERIC_EVENTS, skip_sk);
264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 struct sock *skip_sk)
267 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 HCI_SOCK_TRUSTED, skip_sk);
271 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
274 struct mgmt_rp_read_version rp;
276 BT_DBG("sock %p", sk);
278 rp.version = MGMT_VERSION;
279 rp.revision = cpu_to_le16(MGMT_REVISION);
281 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
285 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
288 struct mgmt_rp_read_commands *rp;
289 u16 num_commands, num_events;
293 BT_DBG("sock %p", sk);
295 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
296 num_commands = ARRAY_SIZE(mgmt_commands);
297 num_events = ARRAY_SIZE(mgmt_events);
299 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
300 num_events = ARRAY_SIZE(mgmt_untrusted_events);
303 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
305 rp = kmalloc(rp_size, GFP_KERNEL);
309 rp->num_commands = cpu_to_le16(num_commands);
310 rp->num_events = cpu_to_le16(num_events);
312 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
313 __le16 *opcode = rp->opcodes;
315 for (i = 0; i < num_commands; i++, opcode++)
316 put_unaligned_le16(mgmt_commands[i], opcode);
318 for (i = 0; i < num_events; i++, opcode++)
319 put_unaligned_le16(mgmt_events[i], opcode);
321 __le16 *opcode = rp->opcodes;
323 for (i = 0; i < num_commands; i++, opcode++)
324 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
326 for (i = 0; i < num_events; i++, opcode++)
327 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
330 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
337 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
340 struct mgmt_rp_read_index_list *rp;
346 BT_DBG("sock %p", sk);
348 read_lock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (d->dev_type == HCI_BREDR &&
353 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
357 rp_len = sizeof(*rp) + (2 * count);
358 rp = kmalloc(rp_len, GFP_ATOMIC);
360 read_unlock(&hci_dev_list_lock);
365 list_for_each_entry(d, &hci_dev_list, list) {
366 if (hci_dev_test_flag(d, HCI_SETUP) ||
367 hci_dev_test_flag(d, HCI_CONFIG) ||
368 hci_dev_test_flag(d, HCI_USER_CHANNEL))
371 /* Devices marked as raw-only are neither configured
372 * nor unconfigured controllers.
374 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
377 if (d->dev_type == HCI_BREDR &&
378 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
379 rp->index[count++] = cpu_to_le16(d->id);
380 BT_DBG("Added hci%u", d->id);
384 rp->num_controllers = cpu_to_le16(count);
385 rp_len = sizeof(*rp) + (2 * count);
387 read_unlock(&hci_dev_list_lock);
389 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
397 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
398 void *data, u16 data_len)
400 struct mgmt_rp_read_unconf_index_list *rp;
406 BT_DBG("sock %p", sk);
408 read_lock(&hci_dev_list_lock);
411 list_for_each_entry(d, &hci_dev_list, list) {
412 if (d->dev_type == HCI_BREDR &&
413 hci_dev_test_flag(d, HCI_UNCONFIGURED))
417 rp_len = sizeof(*rp) + (2 * count);
418 rp = kmalloc(rp_len, GFP_ATOMIC);
420 read_unlock(&hci_dev_list_lock);
425 list_for_each_entry(d, &hci_dev_list, list) {
426 if (hci_dev_test_flag(d, HCI_SETUP) ||
427 hci_dev_test_flag(d, HCI_CONFIG) ||
428 hci_dev_test_flag(d, HCI_USER_CHANNEL))
431 /* Devices marked as raw-only are neither configured
432 * nor unconfigured controllers.
434 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
437 if (d->dev_type == HCI_BREDR &&
438 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
439 rp->index[count++] = cpu_to_le16(d->id);
440 BT_DBG("Added hci%u", d->id);
444 rp->num_controllers = cpu_to_le16(count);
445 rp_len = sizeof(*rp) + (2 * count);
447 read_unlock(&hci_dev_list_lock);
449 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
450 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
457 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
458 void *data, u16 data_len)
460 struct mgmt_rp_read_ext_index_list *rp;
466 BT_DBG("sock %p", sk);
468 read_lock(&hci_dev_list_lock);
471 list_for_each_entry(d, &hci_dev_list, list) {
472 if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
476 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
477 rp = kmalloc(rp_len, GFP_ATOMIC);
479 read_unlock(&hci_dev_list_lock);
484 list_for_each_entry(d, &hci_dev_list, list) {
485 if (hci_dev_test_flag(d, HCI_SETUP) ||
486 hci_dev_test_flag(d, HCI_CONFIG) ||
487 hci_dev_test_flag(d, HCI_USER_CHANNEL))
490 /* Devices marked as raw-only are neither configured
491 * nor unconfigured controllers.
493 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
496 if (d->dev_type == HCI_BREDR) {
497 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
498 rp->entry[count].type = 0x01;
500 rp->entry[count].type = 0x00;
501 } else if (d->dev_type == HCI_AMP) {
502 rp->entry[count].type = 0x02;
507 rp->entry[count].bus = d->bus;
508 rp->entry[count++].index = cpu_to_le16(d->id);
509 BT_DBG("Added hci%u", d->id);
512 rp->num_controllers = cpu_to_le16(count);
513 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
515 read_unlock(&hci_dev_list_lock);
517 /* If this command is called at least once, then all the
518 * default index and unconfigured index events are disabled
519 * and from now on only extended index events are used.
521 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
522 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
523 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
525 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
526 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
533 static bool is_configured(struct hci_dev *hdev)
535 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
536 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
539 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
540 !bacmp(&hdev->public_addr, BDADDR_ANY))
546 static __le32 get_missing_options(struct hci_dev *hdev)
550 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
552 options |= MGMT_OPTION_EXTERNAL_CONFIG;
554 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
555 !bacmp(&hdev->public_addr, BDADDR_ANY))
556 options |= MGMT_OPTION_PUBLIC_ADDRESS;
558 return cpu_to_le32(options);
561 static int new_options(struct hci_dev *hdev, struct sock *skip)
563 __le32 options = get_missing_options(hdev);
565 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
566 sizeof(options), skip);
569 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
571 __le32 options = get_missing_options(hdev);
573 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
577 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
578 void *data, u16 data_len)
580 struct mgmt_rp_read_config_info rp;
583 BT_DBG("sock %p %s", sk, hdev->name);
587 memset(&rp, 0, sizeof(rp));
588 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
590 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
591 options |= MGMT_OPTION_EXTERNAL_CONFIG;
593 if (hdev->set_bdaddr)
594 options |= MGMT_OPTION_PUBLIC_ADDRESS;
596 rp.supported_options = cpu_to_le32(options);
597 rp.missing_options = get_missing_options(hdev);
599 hci_dev_unlock(hdev);
601 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
605 static u32 get_supported_settings(struct hci_dev *hdev)
609 settings |= MGMT_SETTING_POWERED;
610 settings |= MGMT_SETTING_BONDABLE;
611 settings |= MGMT_SETTING_DEBUG_KEYS;
612 settings |= MGMT_SETTING_CONNECTABLE;
613 settings |= MGMT_SETTING_DISCOVERABLE;
615 if (lmp_bredr_capable(hdev)) {
616 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
617 settings |= MGMT_SETTING_FAST_CONNECTABLE;
618 settings |= MGMT_SETTING_BREDR;
619 settings |= MGMT_SETTING_LINK_SECURITY;
621 if (lmp_ssp_capable(hdev)) {
622 settings |= MGMT_SETTING_SSP;
623 settings |= MGMT_SETTING_HS;
626 if (lmp_sc_capable(hdev))
627 settings |= MGMT_SETTING_SECURE_CONN;
630 if (lmp_le_capable(hdev)) {
631 settings |= MGMT_SETTING_LE;
632 settings |= MGMT_SETTING_ADVERTISING;
633 settings |= MGMT_SETTING_SECURE_CONN;
634 settings |= MGMT_SETTING_PRIVACY;
635 settings |= MGMT_SETTING_STATIC_ADDRESS;
638 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
640 settings |= MGMT_SETTING_CONFIGURATION;
645 static u32 get_current_settings(struct hci_dev *hdev)
649 if (hdev_is_powered(hdev))
650 settings |= MGMT_SETTING_POWERED;
652 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
653 settings |= MGMT_SETTING_CONNECTABLE;
655 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
656 settings |= MGMT_SETTING_FAST_CONNECTABLE;
658 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
659 settings |= MGMT_SETTING_DISCOVERABLE;
661 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
662 settings |= MGMT_SETTING_BONDABLE;
664 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
665 settings |= MGMT_SETTING_BREDR;
667 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
668 settings |= MGMT_SETTING_LE;
670 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
671 settings |= MGMT_SETTING_LINK_SECURITY;
673 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
674 settings |= MGMT_SETTING_SSP;
676 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
677 settings |= MGMT_SETTING_HS;
679 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
680 settings |= MGMT_SETTING_ADVERTISING;
682 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
683 settings |= MGMT_SETTING_SECURE_CONN;
685 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
686 settings |= MGMT_SETTING_DEBUG_KEYS;
688 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
689 settings |= MGMT_SETTING_PRIVACY;
691 /* The current setting for static address has two purposes. The
692 * first is to indicate if the static address will be used and
693 * the second is to indicate if it is actually set.
695 * This means if the static address is not configured, this flag
696 * will never be set. If the address is configured, then if the
697 * address is actually used decides if the flag is set or not.
699 * For single mode LE only controllers and dual-mode controllers
700 * with BR/EDR disabled, the existence of the static address will
703 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
704 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
705 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
706 if (bacmp(&hdev->static_addr, BDADDR_ANY))
707 settings |= MGMT_SETTING_STATIC_ADDRESS;
713 #define PNP_INFO_SVCLASS_ID 0x1200
715 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
717 u8 *ptr = data, *uuids_start = NULL;
718 struct bt_uuid *uuid;
723 list_for_each_entry(uuid, &hdev->uuids, list) {
726 if (uuid->size != 16)
729 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
733 if (uuid16 == PNP_INFO_SVCLASS_ID)
739 uuids_start[1] = EIR_UUID16_ALL;
743 /* Stop if not enough space to put next UUID */
744 if ((ptr - data) + sizeof(u16) > len) {
745 uuids_start[1] = EIR_UUID16_SOME;
749 *ptr++ = (uuid16 & 0x00ff);
750 *ptr++ = (uuid16 & 0xff00) >> 8;
751 uuids_start[0] += sizeof(uuid16);
757 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
759 u8 *ptr = data, *uuids_start = NULL;
760 struct bt_uuid *uuid;
765 list_for_each_entry(uuid, &hdev->uuids, list) {
766 if (uuid->size != 32)
772 uuids_start[1] = EIR_UUID32_ALL;
776 /* Stop if not enough space to put next UUID */
777 if ((ptr - data) + sizeof(u32) > len) {
778 uuids_start[1] = EIR_UUID32_SOME;
782 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
784 uuids_start[0] += sizeof(u32);
790 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
792 u8 *ptr = data, *uuids_start = NULL;
793 struct bt_uuid *uuid;
798 list_for_each_entry(uuid, &hdev->uuids, list) {
799 if (uuid->size != 128)
805 uuids_start[1] = EIR_UUID128_ALL;
809 /* Stop if not enough space to put next UUID */
810 if ((ptr - data) + 16 > len) {
811 uuids_start[1] = EIR_UUID128_SOME;
815 memcpy(ptr, uuid->uuid, 16);
817 uuids_start[0] += 16;
823 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
825 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
828 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
829 struct hci_dev *hdev,
832 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
835 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
840 name_len = strlen(hdev->dev_name);
842 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
844 if (name_len > max_len) {
846 ptr[1] = EIR_NAME_SHORT;
848 ptr[1] = EIR_NAME_COMPLETE;
850 ptr[0] = name_len + 1;
852 memcpy(ptr + 2, hdev->dev_name, name_len);
854 ad_len += (name_len + 2);
855 ptr += (name_len + 2);
861 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
863 /* TODO: Set the appropriate entries based on advertising instance flags
864 * here once flags other than 0 are supported.
866 memcpy(ptr, hdev->adv_instance.scan_rsp_data,
867 hdev->adv_instance.scan_rsp_len);
869 return hdev->adv_instance.scan_rsp_len;
872 static void update_scan_rsp_data_for_instance(struct hci_request *req,
875 struct hci_dev *hdev = req->hdev;
876 struct hci_cp_le_set_scan_rsp_data cp;
879 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 memset(&cp, 0, sizeof(cp));
885 len = create_instance_scan_rsp_data(hdev, cp.data);
887 len = create_default_scan_rsp_data(hdev, cp.data);
889 if (hdev->scan_rsp_data_len == len &&
890 !memcmp(cp.data, hdev->scan_rsp_data, len))
893 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
894 hdev->scan_rsp_data_len = len;
898 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
901 static void update_scan_rsp_data(struct hci_request *req)
903 struct hci_dev *hdev = req->hdev;
906 /* The "Set Advertising" setting supersedes the "Add Advertising"
907 * setting. Here we set the scan response data based on which
908 * setting was set. When neither apply, default to the global settings,
909 * represented by instance "0".
911 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
912 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
917 update_scan_rsp_data_for_instance(req, instance);
920 static u8 get_adv_discov_flags(struct hci_dev *hdev)
922 struct mgmt_pending_cmd *cmd;
924 /* If there's a pending mgmt command the flags will not yet have
925 * their final values, so check for this first.
927 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
929 struct mgmt_mode *cp = cmd->param;
931 return LE_AD_GENERAL;
932 else if (cp->val == 0x02)
933 return LE_AD_LIMITED;
935 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
936 return LE_AD_LIMITED;
937 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
938 return LE_AD_GENERAL;
944 static u8 get_current_adv_instance(struct hci_dev *hdev)
946 /* The "Set Advertising" setting supersedes the "Add Advertising"
947 * setting. Here we set the advertising data based on which
948 * setting was set. When neither apply, default to the global settings,
949 * represented by instance "0".
951 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
952 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
958 static bool get_connectable(struct hci_dev *hdev)
960 struct mgmt_pending_cmd *cmd;
962 /* If there's a pending mgmt command the flag will not yet have
963 * it's final value, so check for this first.
965 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
967 struct mgmt_mode *cp = cmd->param;
972 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
975 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
982 if (instance == 0x01)
983 return hdev->adv_instance.flags;
985 /* Instance 0 always manages the "Tx Power" and "Flags" fields */
986 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
988 /* For instance 0, assemble the flags from global settings */
989 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE) ||
990 get_connectable(hdev))
991 flags |= MGMT_ADV_FLAG_CONNECTABLE;
996 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
998 u8 ad_len = 0, flags = 0;
999 u32 instance_flags = get_adv_instance_flags(hdev, instance);
1001 /* The Add Advertising command allows userspace to set both the general
1002 * and limited discoverable flags.
1004 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1005 flags |= LE_AD_GENERAL;
1007 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1008 flags |= LE_AD_LIMITED;
1010 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1011 /* If a discovery flag wasn't provided, simply use the global
1015 flags |= get_adv_discov_flags(hdev);
1017 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1018 flags |= LE_AD_NO_BREDR;
1020 /* If flags would still be empty, then there is no need to
1021 * include the "Flags" AD field".
1033 /* Provide Tx Power only if we can provide a valid value for it */
1034 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1035 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1037 ptr[1] = EIR_TX_POWER;
1038 ptr[2] = (u8)hdev->adv_tx_power;
1045 memcpy(ptr, hdev->adv_instance.adv_data,
1046 hdev->adv_instance.adv_data_len);
1047 ad_len += hdev->adv_instance.adv_data_len;
1053 static void update_adv_data_for_instance(struct hci_request *req, u8 instance)
1055 struct hci_dev *hdev = req->hdev;
1056 struct hci_cp_le_set_adv_data cp;
1059 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1062 memset(&cp, 0, sizeof(cp));
1064 len = create_instance_adv_data(hdev, instance, cp.data);
1066 /* There's nothing to do if the data hasn't changed */
1067 if (hdev->adv_data_len == len &&
1068 memcmp(cp.data, hdev->adv_data, len) == 0)
1071 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1072 hdev->adv_data_len = len;
1076 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1079 static void update_adv_data(struct hci_request *req)
1081 struct hci_dev *hdev = req->hdev;
1082 u8 instance = get_current_adv_instance(hdev);
1084 update_adv_data_for_instance(req, instance);
1087 int mgmt_update_adv_data(struct hci_dev *hdev)
1089 struct hci_request req;
1091 hci_req_init(&req, hdev);
1092 update_adv_data(&req);
1094 return hci_req_run(&req, NULL);
1097 static void create_eir(struct hci_dev *hdev, u8 *data)
1102 name_len = strlen(hdev->dev_name);
1106 if (name_len > 48) {
1108 ptr[1] = EIR_NAME_SHORT;
1110 ptr[1] = EIR_NAME_COMPLETE;
1112 /* EIR Data length */
1113 ptr[0] = name_len + 1;
1115 memcpy(ptr + 2, hdev->dev_name, name_len);
1117 ptr += (name_len + 2);
1120 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1122 ptr[1] = EIR_TX_POWER;
1123 ptr[2] = (u8) hdev->inq_tx_power;
1128 if (hdev->devid_source > 0) {
1130 ptr[1] = EIR_DEVICE_ID;
1132 put_unaligned_le16(hdev->devid_source, ptr + 2);
1133 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1134 put_unaligned_le16(hdev->devid_product, ptr + 6);
1135 put_unaligned_le16(hdev->devid_version, ptr + 8);
1140 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1141 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1142 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1145 static void update_eir(struct hci_request *req)
1147 struct hci_dev *hdev = req->hdev;
1148 struct hci_cp_write_eir cp;
1150 if (!hdev_is_powered(hdev))
1153 if (!lmp_ext_inq_capable(hdev))
1156 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1159 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1162 memset(&cp, 0, sizeof(cp));
1164 create_eir(hdev, cp.data);
1166 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1169 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1171 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1174 static u8 get_service_classes(struct hci_dev *hdev)
1176 struct bt_uuid *uuid;
1179 list_for_each_entry(uuid, &hdev->uuids, list)
1180 val |= uuid->svc_hint;
1185 static void update_class(struct hci_request *req)
1187 struct hci_dev *hdev = req->hdev;
1190 BT_DBG("%s", hdev->name);
1192 if (!hdev_is_powered(hdev))
1195 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1198 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1201 cod[0] = hdev->minor_class;
1202 cod[1] = hdev->major_class;
1203 cod[2] = get_service_classes(hdev);
1205 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1208 if (memcmp(cod, hdev->dev_class, 3) == 0)
1211 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1214 static void disable_advertising(struct hci_request *req)
1218 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1221 static void enable_advertising(struct hci_request *req)
1223 struct hci_dev *hdev = req->hdev;
1224 struct hci_cp_le_set_adv_param cp;
1225 u8 own_addr_type, enable = 0x01;
1230 if (hci_conn_num(hdev, LE_LINK) > 0)
1233 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1234 disable_advertising(req);
1236 /* Clear the HCI_LE_ADV bit temporarily so that the
1237 * hci_update_random_address knows that it's safe to go ahead
1238 * and write a new random address. The flag will be set back on
1239 * as soon as the SET_ADV_ENABLE HCI command completes.
1241 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1243 instance = get_current_adv_instance(hdev);
1244 flags = get_adv_instance_flags(hdev, instance);
1245 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE);
1247 /* Set require_privacy to true only when non-connectable
1248 * advertising is used. In that case it is fine to use a
1249 * non-resolvable private address.
1251 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1254 memset(&cp, 0, sizeof(cp));
1255 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1256 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1257 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1258 cp.own_address_type = own_addr_type;
1259 cp.channel_map = hdev->le_adv_channel_map;
1261 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1263 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1266 static void service_cache_off(struct work_struct *work)
1268 struct hci_dev *hdev = container_of(work, struct hci_dev,
1269 service_cache.work);
1270 struct hci_request req;
1272 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1275 hci_req_init(&req, hdev);
1282 hci_dev_unlock(hdev);
1284 hci_req_run(&req, NULL);
1287 static void rpa_expired(struct work_struct *work)
1289 struct hci_dev *hdev = container_of(work, struct hci_dev,
1291 struct hci_request req;
1295 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1297 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1300 /* The generation of a new RPA and programming it into the
1301 * controller happens in the enable_advertising() function.
1303 hci_req_init(&req, hdev);
1304 enable_advertising(&req);
1305 hci_req_run(&req, NULL);
1308 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1310 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1313 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1314 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1316 /* Non-mgmt controlled devices get this bit set
1317 * implicitly so that pairing works for them, however
1318 * for mgmt we require user-space to explicitly enable
1321 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1324 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1325 void *data, u16 data_len)
1327 struct mgmt_rp_read_info rp;
1329 BT_DBG("sock %p %s", sk, hdev->name);
1333 memset(&rp, 0, sizeof(rp));
1335 bacpy(&rp.bdaddr, &hdev->bdaddr);
1337 rp.version = hdev->hci_ver;
1338 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1340 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1341 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1343 memcpy(rp.dev_class, hdev->dev_class, 3);
1345 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1346 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1348 hci_dev_unlock(hdev);
1350 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1354 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1356 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1358 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1362 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1364 BT_DBG("%s status 0x%02x", hdev->name, status);
1366 if (hci_conn_count(hdev) == 0) {
1367 cancel_delayed_work(&hdev->power_off);
1368 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1372 static bool hci_stop_discovery(struct hci_request *req)
1374 struct hci_dev *hdev = req->hdev;
1375 struct hci_cp_remote_name_req_cancel cp;
1376 struct inquiry_entry *e;
1378 switch (hdev->discovery.state) {
1379 case DISCOVERY_FINDING:
1380 if (test_bit(HCI_INQUIRY, &hdev->flags))
1381 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1383 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1384 cancel_delayed_work(&hdev->le_scan_disable);
1385 hci_req_add_le_scan_disable(req);
1390 case DISCOVERY_RESOLVING:
1391 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1396 bacpy(&cp.bdaddr, &e->data.bdaddr);
1397 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1403 /* Passive scanning */
1404 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1405 hci_req_add_le_scan_disable(req);
1415 static void advertising_added(struct sock *sk, struct hci_dev *hdev,
1418 struct mgmt_ev_advertising_added ev;
1420 ev.instance = instance;
1422 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1425 static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
1428 struct mgmt_ev_advertising_removed ev;
1430 ev.instance = instance;
1432 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1435 static void clear_adv_instance(struct hci_dev *hdev)
1437 struct hci_request req;
1439 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1442 if (hdev->adv_instance.timeout)
1443 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
1445 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
1446 advertising_removed(NULL, hdev, 1);
1447 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1449 if (!hdev_is_powered(hdev) ||
1450 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1453 hci_req_init(&req, hdev);
1454 disable_advertising(&req);
1455 hci_req_run(&req, NULL);
1458 static int clean_up_hci_state(struct hci_dev *hdev)
1460 struct hci_request req;
1461 struct hci_conn *conn;
1462 bool discov_stopped;
1465 hci_req_init(&req, hdev);
1467 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1468 test_bit(HCI_PSCAN, &hdev->flags)) {
1470 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1473 if (hdev->adv_instance.timeout)
1474 clear_adv_instance(hdev);
1476 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1477 disable_advertising(&req);
1479 discov_stopped = hci_stop_discovery(&req);
1481 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1482 struct hci_cp_disconnect dc;
1483 struct hci_cp_reject_conn_req rej;
1485 switch (conn->state) {
1488 dc.handle = cpu_to_le16(conn->handle);
1489 dc.reason = 0x15; /* Terminated due to Power Off */
1490 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1493 if (conn->type == LE_LINK)
1494 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1496 else if (conn->type == ACL_LINK)
1497 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1501 bacpy(&rej.bdaddr, &conn->dst);
1502 rej.reason = 0x15; /* Terminated due to Power Off */
1503 if (conn->type == ACL_LINK)
1504 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1506 else if (conn->type == SCO_LINK)
1507 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1513 err = hci_req_run(&req, clean_up_hci_complete);
1514 if (!err && discov_stopped)
1515 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1520 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1523 struct mgmt_mode *cp = data;
1524 struct mgmt_pending_cmd *cmd;
1527 BT_DBG("request for %s", hdev->name);
1529 if (cp->val != 0x00 && cp->val != 0x01)
1530 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1531 MGMT_STATUS_INVALID_PARAMS);
1535 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1536 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1541 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1542 cancel_delayed_work(&hdev->power_off);
1545 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1547 err = mgmt_powered(hdev, 1);
1552 if (!!cp->val == hdev_is_powered(hdev)) {
1553 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1557 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1564 queue_work(hdev->req_workqueue, &hdev->power_on);
1567 /* Disconnect connections, stop scans, etc */
1568 err = clean_up_hci_state(hdev);
1570 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1571 HCI_POWER_OFF_TIMEOUT);
1573 /* ENODATA means there were no HCI commands queued */
1574 if (err == -ENODATA) {
1575 cancel_delayed_work(&hdev->power_off);
1576 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1582 hci_dev_unlock(hdev);
1586 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1588 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1590 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1594 int mgmt_new_settings(struct hci_dev *hdev)
1596 return new_settings(hdev, NULL);
1601 struct hci_dev *hdev;
1605 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1607 struct cmd_lookup *match = data;
1609 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1611 list_del(&cmd->list);
1613 if (match->sk == NULL) {
1614 match->sk = cmd->sk;
1615 sock_hold(match->sk);
1618 mgmt_pending_free(cmd);
1621 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1625 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1626 mgmt_pending_remove(cmd);
1629 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1631 if (cmd->cmd_complete) {
1634 cmd->cmd_complete(cmd, *status);
1635 mgmt_pending_remove(cmd);
1640 cmd_status_rsp(cmd, data);
1643 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1645 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1646 cmd->param, cmd->param_len);
1649 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1651 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1652 cmd->param, sizeof(struct mgmt_addr_info));
1655 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1657 if (!lmp_bredr_capable(hdev))
1658 return MGMT_STATUS_NOT_SUPPORTED;
1659 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1660 return MGMT_STATUS_REJECTED;
1662 return MGMT_STATUS_SUCCESS;
1665 static u8 mgmt_le_support(struct hci_dev *hdev)
1667 if (!lmp_le_capable(hdev))
1668 return MGMT_STATUS_NOT_SUPPORTED;
1669 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1670 return MGMT_STATUS_REJECTED;
1672 return MGMT_STATUS_SUCCESS;
1675 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1678 struct mgmt_pending_cmd *cmd;
1679 struct mgmt_mode *cp;
1680 struct hci_request req;
1683 BT_DBG("status 0x%02x", status);
1687 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1692 u8 mgmt_err = mgmt_status(status);
1693 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1694 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1700 changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1702 if (hdev->discov_timeout > 0) {
1703 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1704 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1708 changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1711 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1714 new_settings(hdev, cmd->sk);
1716 /* When the discoverable mode gets changed, make sure
1717 * that class of device has the limited discoverable
1718 * bit correctly set. Also update page scan based on whitelist
1721 hci_req_init(&req, hdev);
1722 __hci_update_page_scan(&req);
1724 hci_req_run(&req, NULL);
1727 mgmt_pending_remove(cmd);
1730 hci_dev_unlock(hdev);
1733 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1736 struct mgmt_cp_set_discoverable *cp = data;
1737 struct mgmt_pending_cmd *cmd;
1738 struct hci_request req;
1743 BT_DBG("request for %s", hdev->name);
1745 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1746 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1747 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1748 MGMT_STATUS_REJECTED);
1750 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1751 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1752 MGMT_STATUS_INVALID_PARAMS);
1754 timeout = __le16_to_cpu(cp->timeout);
1756 /* Disabling discoverable requires that no timeout is set,
1757 * and enabling limited discoverable requires a timeout.
1759 if ((cp->val == 0x00 && timeout > 0) ||
1760 (cp->val == 0x02 && timeout == 0))
1761 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1762 MGMT_STATUS_INVALID_PARAMS);
1766 if (!hdev_is_powered(hdev) && timeout > 0) {
1767 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1768 MGMT_STATUS_NOT_POWERED);
1772 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1773 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1774 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1779 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1780 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1781 MGMT_STATUS_REJECTED);
1785 if (!hdev_is_powered(hdev)) {
1786 bool changed = false;
1788 /* Setting limited discoverable when powered off is
1789 * not a valid operation since it requires a timeout
1790 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1792 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1793 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1797 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1802 err = new_settings(hdev, sk);
1807 /* If the current mode is the same, then just update the timeout
1808 * value with the new value. And if only the timeout gets updated,
1809 * then no need for any HCI transactions.
1811 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1812 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1813 HCI_LIMITED_DISCOVERABLE)) {
1814 cancel_delayed_work(&hdev->discov_off);
1815 hdev->discov_timeout = timeout;
1817 if (cp->val && hdev->discov_timeout > 0) {
1818 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1819 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1823 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1827 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1833 /* Cancel any potential discoverable timeout that might be
1834 * still active and store new timeout value. The arming of
1835 * the timeout happens in the complete handler.
1837 cancel_delayed_work(&hdev->discov_off);
1838 hdev->discov_timeout = timeout;
1840 /* Limited discoverable mode */
1841 if (cp->val == 0x02)
1842 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1844 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1846 hci_req_init(&req, hdev);
1848 /* The procedure for LE-only controllers is much simpler - just
1849 * update the advertising data.
1851 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1857 struct hci_cp_write_current_iac_lap hci_cp;
1859 if (cp->val == 0x02) {
1860 /* Limited discoverable mode */
1861 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1862 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1863 hci_cp.iac_lap[1] = 0x8b;
1864 hci_cp.iac_lap[2] = 0x9e;
1865 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1866 hci_cp.iac_lap[4] = 0x8b;
1867 hci_cp.iac_lap[5] = 0x9e;
1869 /* General discoverable mode */
1871 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1872 hci_cp.iac_lap[1] = 0x8b;
1873 hci_cp.iac_lap[2] = 0x9e;
1876 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1877 (hci_cp.num_iac * 3) + 1, &hci_cp);
1879 scan |= SCAN_INQUIRY;
1881 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1884 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1887 update_adv_data(&req);
1889 err = hci_req_run(&req, set_discoverable_complete);
1891 mgmt_pending_remove(cmd);
1894 hci_dev_unlock(hdev);
1898 static void write_fast_connectable(struct hci_request *req, bool enable)
1900 struct hci_dev *hdev = req->hdev;
1901 struct hci_cp_write_page_scan_activity acp;
1904 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1907 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1911 type = PAGE_SCAN_TYPE_INTERLACED;
1913 /* 160 msec page scan interval */
1914 acp.interval = cpu_to_le16(0x0100);
1916 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1918 /* default 1.28 sec page scan */
1919 acp.interval = cpu_to_le16(0x0800);
1922 acp.window = cpu_to_le16(0x0012);
1924 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1925 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1926 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1929 if (hdev->page_scan_type != type)
1930 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1933 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1936 struct mgmt_pending_cmd *cmd;
1937 struct mgmt_mode *cp;
1938 bool conn_changed, discov_changed;
1940 BT_DBG("status 0x%02x", status);
1944 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1949 u8 mgmt_err = mgmt_status(status);
1950 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1956 conn_changed = !hci_dev_test_and_set_flag(hdev,
1958 discov_changed = false;
1960 conn_changed = hci_dev_test_and_clear_flag(hdev,
1962 discov_changed = hci_dev_test_and_clear_flag(hdev,
1966 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1968 if (conn_changed || discov_changed) {
1969 new_settings(hdev, cmd->sk);
1970 hci_update_page_scan(hdev);
1972 mgmt_update_adv_data(hdev);
1973 hci_update_background_scan(hdev);
1977 mgmt_pending_remove(cmd);
1980 hci_dev_unlock(hdev);
1983 static int set_connectable_update_settings(struct hci_dev *hdev,
1984 struct sock *sk, u8 val)
1986 bool changed = false;
1989 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1993 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1995 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1996 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1999 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
2004 hci_update_page_scan(hdev);
2005 hci_update_background_scan(hdev);
2006 return new_settings(hdev, sk);
2012 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
2015 struct mgmt_mode *cp = data;
2016 struct mgmt_pending_cmd *cmd;
2017 struct hci_request req;
2021 BT_DBG("request for %s", hdev->name);
2023 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2024 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2025 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2026 MGMT_STATUS_REJECTED);
2028 if (cp->val != 0x00 && cp->val != 0x01)
2029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2030 MGMT_STATUS_INVALID_PARAMS);
2034 if (!hdev_is_powered(hdev)) {
2035 err = set_connectable_update_settings(hdev, sk, cp->val);
2039 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
2040 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2041 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2046 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2052 hci_req_init(&req, hdev);
2054 /* If BR/EDR is not enabled and we disable advertising as a
2055 * by-product of disabling connectable, we need to update the
2056 * advertising flags.
2058 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2060 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2061 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2063 update_adv_data(&req);
2064 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2068 /* If we don't have any whitelist entries just
2069 * disable all scanning. If there are entries
2070 * and we had both page and inquiry scanning
2071 * enabled then fall back to only page scanning.
2072 * Otherwise no changes are needed.
2074 if (list_empty(&hdev->whitelist))
2075 scan = SCAN_DISABLED;
2076 else if (test_bit(HCI_ISCAN, &hdev->flags))
2079 goto no_scan_update;
2081 if (test_bit(HCI_ISCAN, &hdev->flags) &&
2082 hdev->discov_timeout > 0)
2083 cancel_delayed_work(&hdev->discov_off);
2086 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2090 /* Update the advertising parameters if necessary */
2091 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2092 enable_advertising(&req);
2094 err = hci_req_run(&req, set_connectable_complete);
2096 mgmt_pending_remove(cmd);
2097 if (err == -ENODATA)
2098 err = set_connectable_update_settings(hdev, sk,
2104 hci_dev_unlock(hdev);
2108 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2111 struct mgmt_mode *cp = data;
2115 BT_DBG("request for %s", hdev->name);
2117 if (cp->val != 0x00 && cp->val != 0x01)
2118 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2119 MGMT_STATUS_INVALID_PARAMS);
2124 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2126 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2128 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2133 err = new_settings(hdev, sk);
2136 hci_dev_unlock(hdev);
2140 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2143 struct mgmt_mode *cp = data;
2144 struct mgmt_pending_cmd *cmd;
2148 BT_DBG("request for %s", hdev->name);
2150 status = mgmt_bredr_support(hdev);
2152 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2155 if (cp->val != 0x00 && cp->val != 0x01)
2156 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2157 MGMT_STATUS_INVALID_PARAMS);
2161 if (!hdev_is_powered(hdev)) {
2162 bool changed = false;
2164 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2165 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2169 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2174 err = new_settings(hdev, sk);
2179 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2180 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2187 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2188 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2192 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2198 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2200 mgmt_pending_remove(cmd);
2205 hci_dev_unlock(hdev);
2209 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2211 struct mgmt_mode *cp = data;
2212 struct mgmt_pending_cmd *cmd;
2216 BT_DBG("request for %s", hdev->name);
2218 status = mgmt_bredr_support(hdev);
2220 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2222 if (!lmp_ssp_capable(hdev))
2223 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2224 MGMT_STATUS_NOT_SUPPORTED);
2226 if (cp->val != 0x00 && cp->val != 0x01)
2227 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2228 MGMT_STATUS_INVALID_PARAMS);
2232 if (!hdev_is_powered(hdev)) {
2236 changed = !hci_dev_test_and_set_flag(hdev,
2239 changed = hci_dev_test_and_clear_flag(hdev,
2242 changed = hci_dev_test_and_clear_flag(hdev,
2245 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2248 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2253 err = new_settings(hdev, sk);
2258 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2259 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2264 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2265 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2269 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2275 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2276 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2277 sizeof(cp->val), &cp->val);
2279 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2281 mgmt_pending_remove(cmd);
2286 hci_dev_unlock(hdev);
2290 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2292 struct mgmt_mode *cp = data;
2297 BT_DBG("request for %s", hdev->name);
2299 status = mgmt_bredr_support(hdev);
2301 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2303 if (!lmp_ssp_capable(hdev))
2304 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2305 MGMT_STATUS_NOT_SUPPORTED);
2307 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2308 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2309 MGMT_STATUS_REJECTED);
2311 if (cp->val != 0x00 && cp->val != 0x01)
2312 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2313 MGMT_STATUS_INVALID_PARAMS);
2317 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2318 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2324 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2326 if (hdev_is_powered(hdev)) {
2327 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2328 MGMT_STATUS_REJECTED);
2332 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2335 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2340 err = new_settings(hdev, sk);
2343 hci_dev_unlock(hdev);
2347 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2349 struct cmd_lookup match = { NULL, hdev };
2354 u8 mgmt_err = mgmt_status(status);
2356 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2361 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2363 new_settings(hdev, match.sk);
2368 /* Make sure the controller has a good default for
2369 * advertising data. Restrict the update to when LE
2370 * has actually been enabled. During power on, the
2371 * update in powered_update_hci will take care of it.
2373 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2374 struct hci_request req;
2376 hci_req_init(&req, hdev);
2377 update_adv_data(&req);
2378 update_scan_rsp_data(&req);
2379 __hci_update_background_scan(&req);
2380 hci_req_run(&req, NULL);
2384 hci_dev_unlock(hdev);
2387 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2389 struct mgmt_mode *cp = data;
2390 struct hci_cp_write_le_host_supported hci_cp;
2391 struct mgmt_pending_cmd *cmd;
2392 struct hci_request req;
2396 BT_DBG("request for %s", hdev->name);
2398 if (!lmp_le_capable(hdev))
2399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2400 MGMT_STATUS_NOT_SUPPORTED);
2402 if (cp->val != 0x00 && cp->val != 0x01)
2403 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2404 MGMT_STATUS_INVALID_PARAMS);
2406 /* Bluetooth single mode LE only controllers or dual-mode
2407 * controllers configured as LE only devices, do not allow
2408 * switching LE off. These have either LE enabled explicitly
2409 * or BR/EDR has been previously switched off.
2411 * When trying to enable an already enabled LE, then gracefully
2412 * send a positive response. Trying to disable it however will
2413 * result into rejection.
2415 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2416 if (cp->val == 0x01)
2417 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2419 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2420 MGMT_STATUS_REJECTED);
2426 enabled = lmp_host_le_capable(hdev);
2428 if (!hdev_is_powered(hdev) || val == enabled) {
2429 bool changed = false;
2431 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2432 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2436 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2437 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2441 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2446 err = new_settings(hdev, sk);
2451 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2452 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2453 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2458 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2464 hci_req_init(&req, hdev);
2466 memset(&hci_cp, 0, sizeof(hci_cp));
2470 hci_cp.simul = 0x00;
2472 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2473 disable_advertising(&req);
2476 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2479 err = hci_req_run(&req, le_enable_complete);
2481 mgmt_pending_remove(cmd);
2484 hci_dev_unlock(hdev);
2488 /* This is a helper function to test for pending mgmt commands that can
2489 * cause CoD or EIR HCI commands. We can only allow one such pending
2490 * mgmt command at a time since otherwise we cannot easily track what
2491 * the current values are, will be, and based on that calculate if a new
2492 * HCI command needs to be sent and if yes with what value.
2494 static bool pending_eir_or_class(struct hci_dev *hdev)
2496 struct mgmt_pending_cmd *cmd;
2498 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2499 switch (cmd->opcode) {
2500 case MGMT_OP_ADD_UUID:
2501 case MGMT_OP_REMOVE_UUID:
2502 case MGMT_OP_SET_DEV_CLASS:
2503 case MGMT_OP_SET_POWERED:
2511 static const u8 bluetooth_base_uuid[] = {
2512 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2513 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2516 static u8 get_uuid_size(const u8 *uuid)
2520 if (memcmp(uuid, bluetooth_base_uuid, 12))
2523 val = get_unaligned_le32(&uuid[12]);
2530 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2532 struct mgmt_pending_cmd *cmd;
2536 cmd = pending_find(mgmt_op, hdev);
2540 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2541 mgmt_status(status), hdev->dev_class, 3);
2543 mgmt_pending_remove(cmd);
2546 hci_dev_unlock(hdev);
2549 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2551 BT_DBG("status 0x%02x", status);
2553 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2556 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2558 struct mgmt_cp_add_uuid *cp = data;
2559 struct mgmt_pending_cmd *cmd;
2560 struct hci_request req;
2561 struct bt_uuid *uuid;
2564 BT_DBG("request for %s", hdev->name);
2568 if (pending_eir_or_class(hdev)) {
2569 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2574 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2580 memcpy(uuid->uuid, cp->uuid, 16);
2581 uuid->svc_hint = cp->svc_hint;
2582 uuid->size = get_uuid_size(cp->uuid);
2584 list_add_tail(&uuid->list, &hdev->uuids);
2586 hci_req_init(&req, hdev);
2591 err = hci_req_run(&req, add_uuid_complete);
2593 if (err != -ENODATA)
2596 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2597 hdev->dev_class, 3);
2601 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2610 hci_dev_unlock(hdev);
2614 static bool enable_service_cache(struct hci_dev *hdev)
2616 if (!hdev_is_powered(hdev))
2619 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2620 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2628 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2630 BT_DBG("status 0x%02x", status);
2632 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2635 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2638 struct mgmt_cp_remove_uuid *cp = data;
2639 struct mgmt_pending_cmd *cmd;
2640 struct bt_uuid *match, *tmp;
2641 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2642 struct hci_request req;
2645 BT_DBG("request for %s", hdev->name);
2649 if (pending_eir_or_class(hdev)) {
2650 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2655 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2656 hci_uuids_clear(hdev);
2658 if (enable_service_cache(hdev)) {
2659 err = mgmt_cmd_complete(sk, hdev->id,
2660 MGMT_OP_REMOVE_UUID,
2661 0, hdev->dev_class, 3);
2670 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2671 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2674 list_del(&match->list);
2680 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2681 MGMT_STATUS_INVALID_PARAMS);
2686 hci_req_init(&req, hdev);
2691 err = hci_req_run(&req, remove_uuid_complete);
2693 if (err != -ENODATA)
2696 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2697 hdev->dev_class, 3);
2701 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2710 hci_dev_unlock(hdev);
2714 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2716 BT_DBG("status 0x%02x", status);
2718 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2721 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2724 struct mgmt_cp_set_dev_class *cp = data;
2725 struct mgmt_pending_cmd *cmd;
2726 struct hci_request req;
2729 BT_DBG("request for %s", hdev->name);
2731 if (!lmp_bredr_capable(hdev))
2732 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2733 MGMT_STATUS_NOT_SUPPORTED);
2737 if (pending_eir_or_class(hdev)) {
2738 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2743 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2744 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2745 MGMT_STATUS_INVALID_PARAMS);
2749 hdev->major_class = cp->major;
2750 hdev->minor_class = cp->minor;
2752 if (!hdev_is_powered(hdev)) {
2753 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2754 hdev->dev_class, 3);
2758 hci_req_init(&req, hdev);
2760 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2761 hci_dev_unlock(hdev);
2762 cancel_delayed_work_sync(&hdev->service_cache);
2769 err = hci_req_run(&req, set_class_complete);
2771 if (err != -ENODATA)
2774 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2775 hdev->dev_class, 3);
2779 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2788 hci_dev_unlock(hdev);
2792 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2795 struct mgmt_cp_load_link_keys *cp = data;
2796 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2797 sizeof(struct mgmt_link_key_info));
2798 u16 key_count, expected_len;
2802 BT_DBG("request for %s", hdev->name);
2804 if (!lmp_bredr_capable(hdev))
2805 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2806 MGMT_STATUS_NOT_SUPPORTED);
2808 key_count = __le16_to_cpu(cp->key_count);
2809 if (key_count > max_key_count) {
2810 BT_ERR("load_link_keys: too big key_count value %u",
2812 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2813 MGMT_STATUS_INVALID_PARAMS);
2816 expected_len = sizeof(*cp) + key_count *
2817 sizeof(struct mgmt_link_key_info);
2818 if (expected_len != len) {
2819 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2821 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2822 MGMT_STATUS_INVALID_PARAMS);
2825 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2826 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2827 MGMT_STATUS_INVALID_PARAMS);
2829 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2832 for (i = 0; i < key_count; i++) {
2833 struct mgmt_link_key_info *key = &cp->keys[i];
2835 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2836 return mgmt_cmd_status(sk, hdev->id,
2837 MGMT_OP_LOAD_LINK_KEYS,
2838 MGMT_STATUS_INVALID_PARAMS);
2843 hci_link_keys_clear(hdev);
2846 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2848 changed = hci_dev_test_and_clear_flag(hdev,
2849 HCI_KEEP_DEBUG_KEYS);
2852 new_settings(hdev, NULL);
2854 for (i = 0; i < key_count; i++) {
2855 struct mgmt_link_key_info *key = &cp->keys[i];
2857 /* Always ignore debug keys and require a new pairing if
2858 * the user wants to use them.
2860 if (key->type == HCI_LK_DEBUG_COMBINATION)
2863 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2864 key->type, key->pin_len, NULL);
2867 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2869 hci_dev_unlock(hdev);
2874 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2875 u8 addr_type, struct sock *skip_sk)
2877 struct mgmt_ev_device_unpaired ev;
2879 bacpy(&ev.addr.bdaddr, bdaddr);
2880 ev.addr.type = addr_type;
2882 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2886 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2889 struct mgmt_cp_unpair_device *cp = data;
2890 struct mgmt_rp_unpair_device rp;
2891 struct hci_cp_disconnect dc;
2892 struct mgmt_pending_cmd *cmd;
2893 struct hci_conn *conn;
2896 memset(&rp, 0, sizeof(rp));
2897 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2898 rp.addr.type = cp->addr.type;
2900 if (!bdaddr_type_is_valid(cp->addr.type))
2901 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2902 MGMT_STATUS_INVALID_PARAMS,
2905 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2906 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2907 MGMT_STATUS_INVALID_PARAMS,
2912 if (!hdev_is_powered(hdev)) {
2913 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2914 MGMT_STATUS_NOT_POWERED, &rp,
2919 if (cp->addr.type == BDADDR_BREDR) {
2920 /* If disconnection is requested, then look up the
2921 * connection. If the remote device is connected, it
2922 * will be later used to terminate the link.
2924 * Setting it to NULL explicitly will cause no
2925 * termination of the link.
2928 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2933 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2937 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2940 /* Defer clearing up the connection parameters
2941 * until closing to give a chance of keeping
2942 * them if a repairing happens.
2944 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2946 /* If disconnection is not requested, then
2947 * clear the connection variable so that the
2948 * link is not terminated.
2950 if (!cp->disconnect)
2954 if (cp->addr.type == BDADDR_LE_PUBLIC)
2955 addr_type = ADDR_LE_DEV_PUBLIC;
2957 addr_type = ADDR_LE_DEV_RANDOM;
2959 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2961 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2965 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2966 MGMT_STATUS_NOT_PAIRED, &rp,
2971 /* If the connection variable is set, then termination of the
2972 * link is requested.
2975 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2977 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2981 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2988 cmd->cmd_complete = addr_cmd_complete;
2990 dc.handle = cpu_to_le16(conn->handle);
2991 dc.reason = 0x13; /* Remote User Terminated Connection */
2992 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2994 mgmt_pending_remove(cmd);
2997 hci_dev_unlock(hdev);
3001 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3004 struct mgmt_cp_disconnect *cp = data;
3005 struct mgmt_rp_disconnect rp;
3006 struct mgmt_pending_cmd *cmd;
3007 struct hci_conn *conn;
3012 memset(&rp, 0, sizeof(rp));
3013 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3014 rp.addr.type = cp->addr.type;
3016 if (!bdaddr_type_is_valid(cp->addr.type))
3017 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3018 MGMT_STATUS_INVALID_PARAMS,
3023 if (!test_bit(HCI_UP, &hdev->flags)) {
3024 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3025 MGMT_STATUS_NOT_POWERED, &rp,
3030 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3031 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3032 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3036 if (cp->addr.type == BDADDR_BREDR)
3037 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3040 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
3042 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3043 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3044 MGMT_STATUS_NOT_CONNECTED, &rp,
3049 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3055 cmd->cmd_complete = generic_cmd_complete;
3057 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3059 mgmt_pending_remove(cmd);
3062 hci_dev_unlock(hdev);
3066 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3068 switch (link_type) {
3070 switch (addr_type) {
3071 case ADDR_LE_DEV_PUBLIC:
3072 return BDADDR_LE_PUBLIC;
3075 /* Fallback to LE Random address type */
3076 return BDADDR_LE_RANDOM;
3080 /* Fallback to BR/EDR type */
3081 return BDADDR_BREDR;
3085 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3088 struct mgmt_rp_get_connections *rp;
3098 if (!hdev_is_powered(hdev)) {
3099 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3100 MGMT_STATUS_NOT_POWERED);
3105 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3106 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3110 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3111 rp = kmalloc(rp_len, GFP_KERNEL);
3118 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3119 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3121 bacpy(&rp->addr[i].bdaddr, &c->dst);
3122 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3123 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3128 rp->conn_count = cpu_to_le16(i);
3130 /* Recalculate length in case of filtered SCO connections, etc */
3131 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3133 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3139 hci_dev_unlock(hdev);
3143 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3144 struct mgmt_cp_pin_code_neg_reply *cp)
3146 struct mgmt_pending_cmd *cmd;
3149 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3154 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3155 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3157 mgmt_pending_remove(cmd);
3162 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3165 struct hci_conn *conn;
3166 struct mgmt_cp_pin_code_reply *cp = data;
3167 struct hci_cp_pin_code_reply reply;
3168 struct mgmt_pending_cmd *cmd;
3175 if (!hdev_is_powered(hdev)) {
3176 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3177 MGMT_STATUS_NOT_POWERED);
3181 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3183 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3184 MGMT_STATUS_NOT_CONNECTED);
3188 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3189 struct mgmt_cp_pin_code_neg_reply ncp;
3191 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3193 BT_ERR("PIN code is not 16 bytes long");
3195 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3197 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3198 MGMT_STATUS_INVALID_PARAMS);
3203 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3209 cmd->cmd_complete = addr_cmd_complete;
3211 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3212 reply.pin_len = cp->pin_len;
3213 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3215 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3217 mgmt_pending_remove(cmd);
3220 hci_dev_unlock(hdev);
3224 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3227 struct mgmt_cp_set_io_capability *cp = data;
3231 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3232 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3233 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3237 hdev->io_capability = cp->io_capability;
3239 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3240 hdev->io_capability);
3242 hci_dev_unlock(hdev);
3244 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3248 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3250 struct hci_dev *hdev = conn->hdev;
3251 struct mgmt_pending_cmd *cmd;
3253 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3254 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3257 if (cmd->user_data != conn)
3266 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3268 struct mgmt_rp_pair_device rp;
3269 struct hci_conn *conn = cmd->user_data;
3272 bacpy(&rp.addr.bdaddr, &conn->dst);
3273 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3275 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3276 status, &rp, sizeof(rp));
3278 /* So we don't get further callbacks for this connection */
3279 conn->connect_cfm_cb = NULL;
3280 conn->security_cfm_cb = NULL;
3281 conn->disconn_cfm_cb = NULL;
3283 hci_conn_drop(conn);
3285 /* The device is paired so there is no need to remove
3286 * its connection parameters anymore.
3288 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3295 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3297 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3298 struct mgmt_pending_cmd *cmd;
3300 cmd = find_pairing(conn);
3302 cmd->cmd_complete(cmd, status);
3303 mgmt_pending_remove(cmd);
3307 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3309 struct mgmt_pending_cmd *cmd;
3311 BT_DBG("status %u", status);
3313 cmd = find_pairing(conn);
3315 BT_DBG("Unable to find a pending command");
3319 cmd->cmd_complete(cmd, mgmt_status(status));
3320 mgmt_pending_remove(cmd);
3323 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3325 struct mgmt_pending_cmd *cmd;
3327 BT_DBG("status %u", status);
3332 cmd = find_pairing(conn);
3334 BT_DBG("Unable to find a pending command");
3338 cmd->cmd_complete(cmd, mgmt_status(status));
3339 mgmt_pending_remove(cmd);
3342 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3345 struct mgmt_cp_pair_device *cp = data;
3346 struct mgmt_rp_pair_device rp;
3347 struct mgmt_pending_cmd *cmd;
3348 u8 sec_level, auth_type;
3349 struct hci_conn *conn;
3354 memset(&rp, 0, sizeof(rp));
3355 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3356 rp.addr.type = cp->addr.type;
3358 if (!bdaddr_type_is_valid(cp->addr.type))
3359 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3360 MGMT_STATUS_INVALID_PARAMS,
3363 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3364 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3365 MGMT_STATUS_INVALID_PARAMS,
3370 if (!hdev_is_powered(hdev)) {
3371 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3372 MGMT_STATUS_NOT_POWERED, &rp,
3377 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3378 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3379 MGMT_STATUS_ALREADY_PAIRED, &rp,
3384 sec_level = BT_SECURITY_MEDIUM;
3385 auth_type = HCI_AT_DEDICATED_BONDING;
3387 if (cp->addr.type == BDADDR_BREDR) {
3388 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3393 /* Convert from L2CAP channel address type to HCI address type
3395 if (cp->addr.type == BDADDR_LE_PUBLIC)
3396 addr_type = ADDR_LE_DEV_PUBLIC;
3398 addr_type = ADDR_LE_DEV_RANDOM;
3400 /* When pairing a new device, it is expected to remember
3401 * this device for future connections. Adding the connection
3402 * parameter information ahead of time allows tracking
3403 * of the slave preferred values and will speed up any
3404 * further connection establishment.
3406 * If connection parameters already exist, then they
3407 * will be kept and this function does nothing.
3409 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3411 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3412 sec_level, HCI_LE_CONN_TIMEOUT,
3419 if (PTR_ERR(conn) == -EBUSY)
3420 status = MGMT_STATUS_BUSY;
3421 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3422 status = MGMT_STATUS_NOT_SUPPORTED;
3423 else if (PTR_ERR(conn) == -ECONNREFUSED)
3424 status = MGMT_STATUS_REJECTED;
3426 status = MGMT_STATUS_CONNECT_FAILED;
3428 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3429 status, &rp, sizeof(rp));
3433 if (conn->connect_cfm_cb) {
3434 hci_conn_drop(conn);
3435 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3436 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3440 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3443 hci_conn_drop(conn);
3447 cmd->cmd_complete = pairing_complete;
3449 /* For LE, just connecting isn't a proof that the pairing finished */
3450 if (cp->addr.type == BDADDR_BREDR) {
3451 conn->connect_cfm_cb = pairing_complete_cb;
3452 conn->security_cfm_cb = pairing_complete_cb;
3453 conn->disconn_cfm_cb = pairing_complete_cb;
3455 conn->connect_cfm_cb = le_pairing_complete_cb;
3456 conn->security_cfm_cb = le_pairing_complete_cb;
3457 conn->disconn_cfm_cb = le_pairing_complete_cb;
3460 conn->io_capability = cp->io_cap;
3461 cmd->user_data = hci_conn_get(conn);
3463 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3464 hci_conn_security(conn, sec_level, auth_type, true)) {
3465 cmd->cmd_complete(cmd, 0);
3466 mgmt_pending_remove(cmd);
3472 hci_dev_unlock(hdev);
3476 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3479 struct mgmt_addr_info *addr = data;
3480 struct mgmt_pending_cmd *cmd;
3481 struct hci_conn *conn;
3488 if (!hdev_is_powered(hdev)) {
3489 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3490 MGMT_STATUS_NOT_POWERED);
3494 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3496 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3497 MGMT_STATUS_INVALID_PARAMS);
3501 conn = cmd->user_data;
3503 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3504 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3505 MGMT_STATUS_INVALID_PARAMS);
3509 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3510 mgmt_pending_remove(cmd);
3512 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3513 addr, sizeof(*addr));
3515 hci_dev_unlock(hdev);
3519 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3520 struct mgmt_addr_info *addr, u16 mgmt_op,
3521 u16 hci_op, __le32 passkey)
3523 struct mgmt_pending_cmd *cmd;
3524 struct hci_conn *conn;
3529 if (!hdev_is_powered(hdev)) {
3530 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3531 MGMT_STATUS_NOT_POWERED, addr,
3536 if (addr->type == BDADDR_BREDR)
3537 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3539 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3542 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3543 MGMT_STATUS_NOT_CONNECTED, addr,
3548 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3549 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3551 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3552 MGMT_STATUS_SUCCESS, addr,
3555 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3556 MGMT_STATUS_FAILED, addr,
3562 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3568 cmd->cmd_complete = addr_cmd_complete;
3570 /* Continue with pairing via HCI */
3571 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3572 struct hci_cp_user_passkey_reply cp;
3574 bacpy(&cp.bdaddr, &addr->bdaddr);
3575 cp.passkey = passkey;
3576 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3578 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3582 mgmt_pending_remove(cmd);
3585 hci_dev_unlock(hdev);
3589 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3590 void *data, u16 len)
3592 struct mgmt_cp_pin_code_neg_reply *cp = data;
3596 return user_pairing_resp(sk, hdev, &cp->addr,
3597 MGMT_OP_PIN_CODE_NEG_REPLY,
3598 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3601 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3604 struct mgmt_cp_user_confirm_reply *cp = data;
3608 if (len != sizeof(*cp))
3609 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3610 MGMT_STATUS_INVALID_PARAMS);
3612 return user_pairing_resp(sk, hdev, &cp->addr,
3613 MGMT_OP_USER_CONFIRM_REPLY,
3614 HCI_OP_USER_CONFIRM_REPLY, 0);
3617 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3618 void *data, u16 len)
3620 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3624 return user_pairing_resp(sk, hdev, &cp->addr,
3625 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3626 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3629 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3632 struct mgmt_cp_user_passkey_reply *cp = data;
3636 return user_pairing_resp(sk, hdev, &cp->addr,
3637 MGMT_OP_USER_PASSKEY_REPLY,
3638 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3641 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3642 void *data, u16 len)
3644 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3648 return user_pairing_resp(sk, hdev, &cp->addr,
3649 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3650 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3653 static void update_name(struct hci_request *req)
3655 struct hci_dev *hdev = req->hdev;
3656 struct hci_cp_write_local_name cp;
3658 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3660 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3663 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3665 struct mgmt_cp_set_local_name *cp;
3666 struct mgmt_pending_cmd *cmd;
3668 BT_DBG("status 0x%02x", status);
3672 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3679 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3680 mgmt_status(status));
3682 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3685 mgmt_pending_remove(cmd);
3688 hci_dev_unlock(hdev);
3691 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3694 struct mgmt_cp_set_local_name *cp = data;
3695 struct mgmt_pending_cmd *cmd;
3696 struct hci_request req;
3703 /* If the old values are the same as the new ones just return a
3704 * direct command complete event.
3706 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3707 !memcmp(hdev->short_name, cp->short_name,
3708 sizeof(hdev->short_name))) {
3709 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3714 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3716 if (!hdev_is_powered(hdev)) {
3717 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3719 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3724 err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3730 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3736 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3738 hci_req_init(&req, hdev);
3740 if (lmp_bredr_capable(hdev)) {
3745 /* The name is stored in the scan response data and so
3746 * no need to udpate the advertising data here.
3748 if (lmp_le_capable(hdev))
3749 update_scan_rsp_data(&req);
3751 err = hci_req_run(&req, set_name_complete);
3753 mgmt_pending_remove(cmd);
3756 hci_dev_unlock(hdev);
3760 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3761 void *data, u16 data_len)
3763 struct mgmt_pending_cmd *cmd;
3766 BT_DBG("%s", hdev->name);
3770 if (!hdev_is_powered(hdev)) {
3771 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3772 MGMT_STATUS_NOT_POWERED);
3776 if (!lmp_ssp_capable(hdev)) {
3777 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3778 MGMT_STATUS_NOT_SUPPORTED);
3782 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3783 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3788 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3794 if (bredr_sc_enabled(hdev))
3795 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3798 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3801 mgmt_pending_remove(cmd);
3804 hci_dev_unlock(hdev);
3808 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3809 void *data, u16 len)
3811 struct mgmt_addr_info *addr = data;
3814 BT_DBG("%s ", hdev->name);
3816 if (!bdaddr_type_is_valid(addr->type))
3817 return mgmt_cmd_complete(sk, hdev->id,
3818 MGMT_OP_ADD_REMOTE_OOB_DATA,
3819 MGMT_STATUS_INVALID_PARAMS,
3820 addr, sizeof(*addr));
3824 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3825 struct mgmt_cp_add_remote_oob_data *cp = data;
3828 if (cp->addr.type != BDADDR_BREDR) {
3829 err = mgmt_cmd_complete(sk, hdev->id,
3830 MGMT_OP_ADD_REMOTE_OOB_DATA,
3831 MGMT_STATUS_INVALID_PARAMS,
3832 &cp->addr, sizeof(cp->addr));
3836 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3837 cp->addr.type, cp->hash,
3838 cp->rand, NULL, NULL);
3840 status = MGMT_STATUS_FAILED;
3842 status = MGMT_STATUS_SUCCESS;
3844 err = mgmt_cmd_complete(sk, hdev->id,
3845 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3846 &cp->addr, sizeof(cp->addr));
3847 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3848 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3849 u8 *rand192, *hash192, *rand256, *hash256;
3852 if (bdaddr_type_is_le(cp->addr.type)) {
3853 /* Enforce zero-valued 192-bit parameters as
3854 * long as legacy SMP OOB isn't implemented.
3856 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3857 memcmp(cp->hash192, ZERO_KEY, 16)) {
3858 err = mgmt_cmd_complete(sk, hdev->id,
3859 MGMT_OP_ADD_REMOTE_OOB_DATA,
3860 MGMT_STATUS_INVALID_PARAMS,
3861 addr, sizeof(*addr));
3868 /* In case one of the P-192 values is set to zero,
3869 * then just disable OOB data for P-192.
3871 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3872 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3876 rand192 = cp->rand192;
3877 hash192 = cp->hash192;
3881 /* In case one of the P-256 values is set to zero, then just
3882 * disable OOB data for P-256.
3884 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3885 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3889 rand256 = cp->rand256;
3890 hash256 = cp->hash256;
3893 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3894 cp->addr.type, hash192, rand192,
3897 status = MGMT_STATUS_FAILED;
3899 status = MGMT_STATUS_SUCCESS;
3901 err = mgmt_cmd_complete(sk, hdev->id,
3902 MGMT_OP_ADD_REMOTE_OOB_DATA,
3903 status, &cp->addr, sizeof(cp->addr));
3905 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3906 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3907 MGMT_STATUS_INVALID_PARAMS);
3911 hci_dev_unlock(hdev);
3915 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3916 void *data, u16 len)
3918 struct mgmt_cp_remove_remote_oob_data *cp = data;
3922 BT_DBG("%s", hdev->name);
3924 if (cp->addr.type != BDADDR_BREDR)
3925 return mgmt_cmd_complete(sk, hdev->id,
3926 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3927 MGMT_STATUS_INVALID_PARAMS,
3928 &cp->addr, sizeof(cp->addr));
3932 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3933 hci_remote_oob_data_clear(hdev);
3934 status = MGMT_STATUS_SUCCESS;
3938 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3940 status = MGMT_STATUS_INVALID_PARAMS;
3942 status = MGMT_STATUS_SUCCESS;
3945 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3946 status, &cp->addr, sizeof(cp->addr));
3948 hci_dev_unlock(hdev);
3952 static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
3954 struct hci_dev *hdev = req->hdev;
3955 struct hci_cp_inquiry cp;
3956 /* General inquiry access code (GIAC) */
3957 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3959 *status = mgmt_bredr_support(hdev);
3963 if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
3964 *status = MGMT_STATUS_BUSY;
3968 hci_inquiry_cache_flush(hdev);
3970 memset(&cp, 0, sizeof(cp));
3971 memcpy(&cp.lap, lap, sizeof(cp.lap));
3972 cp.length = DISCOV_BREDR_INQUIRY_LEN;
3974 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3979 static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
3981 struct hci_dev *hdev = req->hdev;
3982 struct hci_cp_le_set_scan_param param_cp;
3983 struct hci_cp_le_set_scan_enable enable_cp;
3987 *status = mgmt_le_support(hdev);
3991 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
3992 /* Don't let discovery abort an outgoing connection attempt
3993 * that's using directed advertising.
3995 if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3996 *status = MGMT_STATUS_REJECTED;
4000 disable_advertising(req);
4003 /* If controller is scanning, it means the background scanning is
4004 * running. Thus, we should temporarily stop it in order to set the
4005 * discovery scanning parameters.
4007 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
4008 hci_req_add_le_scan_disable(req);
4010 /* All active scans will be done with either a resolvable private
4011 * address (when privacy feature has been enabled) or non-resolvable
4014 err = hci_update_random_address(req, true, &own_addr_type);
4016 *status = MGMT_STATUS_FAILED;
4020 memset(¶m_cp, 0, sizeof(param_cp));
4021 param_cp.type = LE_SCAN_ACTIVE;
4022 param_cp.interval = cpu_to_le16(interval);
4023 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
4024 param_cp.own_address_type = own_addr_type;
4026 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
4029 memset(&enable_cp, 0, sizeof(enable_cp));
4030 enable_cp.enable = LE_SCAN_ENABLE;
4031 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4033 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
4039 static bool trigger_discovery(struct hci_request *req, u8 *status)
4041 struct hci_dev *hdev = req->hdev;
4043 switch (hdev->discovery.type) {
4044 case DISCOV_TYPE_BREDR:
4045 if (!trigger_bredr_inquiry(req, status))
4049 case DISCOV_TYPE_INTERLEAVED:
4050 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
4052 /* During simultaneous discovery, we double LE scan
4053 * interval. We must leave some time for the controller
4054 * to do BR/EDR inquiry.
4056 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
4060 if (!trigger_bredr_inquiry(req, status))
4066 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4067 *status = MGMT_STATUS_NOT_SUPPORTED;
4072 case DISCOV_TYPE_LE:
4073 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
4078 *status = MGMT_STATUS_INVALID_PARAMS;
4085 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
4088 struct mgmt_pending_cmd *cmd;
4089 unsigned long timeout;
4091 BT_DBG("status %d", status);
4095 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4097 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4100 cmd->cmd_complete(cmd, mgmt_status(status));
4101 mgmt_pending_remove(cmd);
4105 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4109 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
4111 /* If the scan involves LE scan, pick proper timeout to schedule
4112 * hdev->le_scan_disable that will stop it.
4114 switch (hdev->discovery.type) {
4115 case DISCOV_TYPE_LE:
4116 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4118 case DISCOV_TYPE_INTERLEAVED:
4119 /* When running simultaneous discovery, the LE scanning time
4120 * should occupy the whole discovery time sine BR/EDR inquiry
4121 * and LE scanning are scheduled by the controller.
4123 * For interleaving discovery in comparison, BR/EDR inquiry
4124 * and LE scanning are done sequentially with separate
4127 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
4128 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4130 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4132 case DISCOV_TYPE_BREDR:
4136 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4142 /* When service discovery is used and the controller has
4143 * a strict duplicate filter, it is important to remember
4144 * the start and duration of the scan. This is required
4145 * for restarting scanning during the discovery phase.
4147 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
4149 hdev->discovery.result_filtering) {
4150 hdev->discovery.scan_start = jiffies;
4151 hdev->discovery.scan_duration = timeout;
4154 queue_delayed_work(hdev->workqueue,
4155 &hdev->le_scan_disable, timeout);
4159 hci_dev_unlock(hdev);
4162 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4163 void *data, u16 len)
4165 struct mgmt_cp_start_discovery *cp = data;
4166 struct mgmt_pending_cmd *cmd;
4167 struct hci_request req;
4171 BT_DBG("%s", hdev->name);
4175 if (!hdev_is_powered(hdev)) {
4176 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4177 MGMT_STATUS_NOT_POWERED,
4178 &cp->type, sizeof(cp->type));
4182 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4183 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4184 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4185 MGMT_STATUS_BUSY, &cp->type,
4190 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4196 cmd->cmd_complete = generic_cmd_complete;
4198 /* Clear the discovery filter first to free any previously
4199 * allocated memory for the UUID list.
4201 hci_discovery_filter_clear(hdev);
4203 hdev->discovery.type = cp->type;
4204 hdev->discovery.report_invalid_rssi = false;
4206 hci_req_init(&req, hdev);
4208 if (!trigger_discovery(&req, &status)) {
4209 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4210 status, &cp->type, sizeof(cp->type));
4211 mgmt_pending_remove(cmd);
4215 err = hci_req_run(&req, start_discovery_complete);
4217 mgmt_pending_remove(cmd);
4221 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4224 hci_dev_unlock(hdev);
4228 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4231 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4235 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4236 void *data, u16 len)
4238 struct mgmt_cp_start_service_discovery *cp = data;
4239 struct mgmt_pending_cmd *cmd;
4240 struct hci_request req;
4241 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4242 u16 uuid_count, expected_len;
4246 BT_DBG("%s", hdev->name);
4250 if (!hdev_is_powered(hdev)) {
4251 err = mgmt_cmd_complete(sk, hdev->id,
4252 MGMT_OP_START_SERVICE_DISCOVERY,
4253 MGMT_STATUS_NOT_POWERED,
4254 &cp->type, sizeof(cp->type));
4258 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4259 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4260 err = mgmt_cmd_complete(sk, hdev->id,
4261 MGMT_OP_START_SERVICE_DISCOVERY,
4262 MGMT_STATUS_BUSY, &cp->type,
4267 uuid_count = __le16_to_cpu(cp->uuid_count);
4268 if (uuid_count > max_uuid_count) {
4269 BT_ERR("service_discovery: too big uuid_count value %u",
4271 err = mgmt_cmd_complete(sk, hdev->id,
4272 MGMT_OP_START_SERVICE_DISCOVERY,
4273 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4278 expected_len = sizeof(*cp) + uuid_count * 16;
4279 if (expected_len != len) {
4280 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4282 err = mgmt_cmd_complete(sk, hdev->id,
4283 MGMT_OP_START_SERVICE_DISCOVERY,
4284 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4289 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4296 cmd->cmd_complete = service_discovery_cmd_complete;
4298 /* Clear the discovery filter first to free any previously
4299 * allocated memory for the UUID list.
4301 hci_discovery_filter_clear(hdev);
4303 hdev->discovery.result_filtering = true;
4304 hdev->discovery.type = cp->type;
4305 hdev->discovery.rssi = cp->rssi;
4306 hdev->discovery.uuid_count = uuid_count;
4308 if (uuid_count > 0) {
4309 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4311 if (!hdev->discovery.uuids) {
4312 err = mgmt_cmd_complete(sk, hdev->id,
4313 MGMT_OP_START_SERVICE_DISCOVERY,
4315 &cp->type, sizeof(cp->type));
4316 mgmt_pending_remove(cmd);
4321 hci_req_init(&req, hdev);
4323 if (!trigger_discovery(&req, &status)) {
4324 err = mgmt_cmd_complete(sk, hdev->id,
4325 MGMT_OP_START_SERVICE_DISCOVERY,
4326 status, &cp->type, sizeof(cp->type));
4327 mgmt_pending_remove(cmd);
4331 err = hci_req_run(&req, start_discovery_complete);
4333 mgmt_pending_remove(cmd);
4337 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4340 hci_dev_unlock(hdev);
4344 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4346 struct mgmt_pending_cmd *cmd;
4348 BT_DBG("status %d", status);
4352 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4354 cmd->cmd_complete(cmd, mgmt_status(status));
4355 mgmt_pending_remove(cmd);
4359 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4361 hci_dev_unlock(hdev);
4364 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4367 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4368 struct mgmt_pending_cmd *cmd;
4369 struct hci_request req;
4372 BT_DBG("%s", hdev->name);
4376 if (!hci_discovery_active(hdev)) {
4377 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4378 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4379 sizeof(mgmt_cp->type));
4383 if (hdev->discovery.type != mgmt_cp->type) {
4384 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4385 MGMT_STATUS_INVALID_PARAMS,
4386 &mgmt_cp->type, sizeof(mgmt_cp->type));
4390 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4396 cmd->cmd_complete = generic_cmd_complete;
4398 hci_req_init(&req, hdev);
4400 hci_stop_discovery(&req);
4402 err = hci_req_run(&req, stop_discovery_complete);
4404 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4408 mgmt_pending_remove(cmd);
4410 /* If no HCI commands were sent we're done */
4411 if (err == -ENODATA) {
4412 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4413 &mgmt_cp->type, sizeof(mgmt_cp->type));
4414 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4418 hci_dev_unlock(hdev);
4422 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4425 struct mgmt_cp_confirm_name *cp = data;
4426 struct inquiry_entry *e;
4429 BT_DBG("%s", hdev->name);
4433 if (!hci_discovery_active(hdev)) {
4434 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4435 MGMT_STATUS_FAILED, &cp->addr,
4440 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4442 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4443 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4448 if (cp->name_known) {
4449 e->name_state = NAME_KNOWN;
4452 e->name_state = NAME_NEEDED;
4453 hci_inquiry_cache_update_resolve(hdev, e);
4456 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4457 &cp->addr, sizeof(cp->addr));
4460 hci_dev_unlock(hdev);
4464 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4467 struct mgmt_cp_block_device *cp = data;
4471 BT_DBG("%s", hdev->name);
4473 if (!bdaddr_type_is_valid(cp->addr.type))
4474 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4475 MGMT_STATUS_INVALID_PARAMS,
4476 &cp->addr, sizeof(cp->addr));
4480 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4483 status = MGMT_STATUS_FAILED;
4487 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4489 status = MGMT_STATUS_SUCCESS;
4492 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4493 &cp->addr, sizeof(cp->addr));
4495 hci_dev_unlock(hdev);
4500 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4503 struct mgmt_cp_unblock_device *cp = data;
4507 BT_DBG("%s", hdev->name);
4509 if (!bdaddr_type_is_valid(cp->addr.type))
4510 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4511 MGMT_STATUS_INVALID_PARAMS,
4512 &cp->addr, sizeof(cp->addr));
4516 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4519 status = MGMT_STATUS_INVALID_PARAMS;
4523 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4525 status = MGMT_STATUS_SUCCESS;
4528 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4529 &cp->addr, sizeof(cp->addr));
4531 hci_dev_unlock(hdev);
4536 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4539 struct mgmt_cp_set_device_id *cp = data;
4540 struct hci_request req;
4544 BT_DBG("%s", hdev->name);
4546 source = __le16_to_cpu(cp->source);
4548 if (source > 0x0002)
4549 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4550 MGMT_STATUS_INVALID_PARAMS);
4554 hdev->devid_source = source;
4555 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4556 hdev->devid_product = __le16_to_cpu(cp->product);
4557 hdev->devid_version = __le16_to_cpu(cp->version);
4559 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4562 hci_req_init(&req, hdev);
4564 hci_req_run(&req, NULL);
4566 hci_dev_unlock(hdev);
4571 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4574 BT_DBG("status %d", status);
4577 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4580 struct cmd_lookup match = { NULL, hdev };
4581 struct hci_request req;
4586 u8 mgmt_err = mgmt_status(status);
4588 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4589 cmd_status_rsp, &mgmt_err);
4593 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4594 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4596 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4598 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4601 new_settings(hdev, match.sk);
4606 /* If "Set Advertising" was just disabled and instance advertising was
4607 * set up earlier, then enable the advertising instance.
4609 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4610 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
4613 hci_req_init(&req, hdev);
4615 update_adv_data(&req);
4616 enable_advertising(&req);
4618 if (hci_req_run(&req, enable_advertising_instance) < 0)
4619 BT_ERR("Failed to re-configure advertising");
4622 hci_dev_unlock(hdev);
4625 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4628 struct mgmt_mode *cp = data;
4629 struct mgmt_pending_cmd *cmd;
4630 struct hci_request req;
4634 BT_DBG("request for %s", hdev->name);
4636 status = mgmt_le_support(hdev);
4638 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4641 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4642 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4643 MGMT_STATUS_INVALID_PARAMS);
4649 /* The following conditions are ones which mean that we should
4650 * not do any HCI communication but directly send a mgmt
4651 * response to user space (after toggling the flag if
4654 if (!hdev_is_powered(hdev) ||
4655 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4656 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4657 hci_conn_num(hdev, LE_LINK) > 0 ||
4658 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4659 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4663 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4664 if (cp->val == 0x02)
4665 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4667 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4669 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4670 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4673 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4678 err = new_settings(hdev, sk);
4683 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4684 pending_find(MGMT_OP_SET_LE, hdev)) {
4685 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4690 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4696 hci_req_init(&req, hdev);
4698 if (cp->val == 0x02)
4699 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4701 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4704 /* Switch to instance "0" for the Set Advertising setting. */
4705 update_adv_data_for_instance(&req, 0);
4706 update_scan_rsp_data_for_instance(&req, 0);
4707 enable_advertising(&req);
4709 disable_advertising(&req);
4712 err = hci_req_run(&req, set_advertising_complete);
4714 mgmt_pending_remove(cmd);
4717 hci_dev_unlock(hdev);
4721 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4722 void *data, u16 len)
4724 struct mgmt_cp_set_static_address *cp = data;
4727 BT_DBG("%s", hdev->name);
4729 if (!lmp_le_capable(hdev))
4730 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4731 MGMT_STATUS_NOT_SUPPORTED);
4733 if (hdev_is_powered(hdev))
4734 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4735 MGMT_STATUS_REJECTED);
4737 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4738 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4739 return mgmt_cmd_status(sk, hdev->id,
4740 MGMT_OP_SET_STATIC_ADDRESS,
4741 MGMT_STATUS_INVALID_PARAMS);
4743 /* Two most significant bits shall be set */
4744 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4745 return mgmt_cmd_status(sk, hdev->id,
4746 MGMT_OP_SET_STATIC_ADDRESS,
4747 MGMT_STATUS_INVALID_PARAMS);
4752 bacpy(&hdev->static_addr, &cp->bdaddr);
4754 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4758 err = new_settings(hdev, sk);
4761 hci_dev_unlock(hdev);
4765 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4766 void *data, u16 len)
4768 struct mgmt_cp_set_scan_params *cp = data;
4769 __u16 interval, window;
4772 BT_DBG("%s", hdev->name);
4774 if (!lmp_le_capable(hdev))
4775 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4776 MGMT_STATUS_NOT_SUPPORTED);
4778 interval = __le16_to_cpu(cp->interval);
4780 if (interval < 0x0004 || interval > 0x4000)
4781 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4782 MGMT_STATUS_INVALID_PARAMS);
4784 window = __le16_to_cpu(cp->window);
4786 if (window < 0x0004 || window > 0x4000)
4787 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4788 MGMT_STATUS_INVALID_PARAMS);
4790 if (window > interval)
4791 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4792 MGMT_STATUS_INVALID_PARAMS);
4796 hdev->le_scan_interval = interval;
4797 hdev->le_scan_window = window;
4799 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4802 /* If background scan is running, restart it so new parameters are
4805 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4806 hdev->discovery.state == DISCOVERY_STOPPED) {
4807 struct hci_request req;
4809 hci_req_init(&req, hdev);
4811 hci_req_add_le_scan_disable(&req);
4812 hci_req_add_le_passive_scan(&req);
4814 hci_req_run(&req, NULL);
4817 hci_dev_unlock(hdev);
4822 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4825 struct mgmt_pending_cmd *cmd;
4827 BT_DBG("status 0x%02x", status);
4831 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4836 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4837 mgmt_status(status));
4839 struct mgmt_mode *cp = cmd->param;
4842 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4844 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4846 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4847 new_settings(hdev, cmd->sk);
4850 mgmt_pending_remove(cmd);
4853 hci_dev_unlock(hdev);
4856 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4857 void *data, u16 len)
4859 struct mgmt_mode *cp = data;
4860 struct mgmt_pending_cmd *cmd;
4861 struct hci_request req;
4864 BT_DBG("%s", hdev->name);
4866 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4867 hdev->hci_ver < BLUETOOTH_VER_1_2)
4868 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4869 MGMT_STATUS_NOT_SUPPORTED);
4871 if (cp->val != 0x00 && cp->val != 0x01)
4872 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4873 MGMT_STATUS_INVALID_PARAMS);
4877 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4878 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4883 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4884 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4889 if (!hdev_is_powered(hdev)) {
4890 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4891 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4893 new_settings(hdev, sk);
4897 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4904 hci_req_init(&req, hdev);
4906 write_fast_connectable(&req, cp->val);
4908 err = hci_req_run(&req, fast_connectable_complete);
4910 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4911 MGMT_STATUS_FAILED);
4912 mgmt_pending_remove(cmd);
4916 hci_dev_unlock(hdev);
4921 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4923 struct mgmt_pending_cmd *cmd;
4925 BT_DBG("status 0x%02x", status);
4929 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4934 u8 mgmt_err = mgmt_status(status);
4936 /* We need to restore the flag if related HCI commands
4939 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4941 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4943 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4944 new_settings(hdev, cmd->sk);
4947 mgmt_pending_remove(cmd);
4950 hci_dev_unlock(hdev);
4953 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4955 struct mgmt_mode *cp = data;
4956 struct mgmt_pending_cmd *cmd;
4957 struct hci_request req;
4960 BT_DBG("request for %s", hdev->name);
4962 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4963 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4964 MGMT_STATUS_NOT_SUPPORTED);
4966 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4967 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4968 MGMT_STATUS_REJECTED);
4970 if (cp->val != 0x00 && cp->val != 0x01)
4971 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4972 MGMT_STATUS_INVALID_PARAMS);
4976 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4977 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4981 if (!hdev_is_powered(hdev)) {
4983 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4984 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4985 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4986 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4987 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4990 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4992 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4996 err = new_settings(hdev, sk);
5000 /* Reject disabling when powered on */
5002 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5003 MGMT_STATUS_REJECTED);
5006 /* When configuring a dual-mode controller to operate
5007 * with LE only and using a static address, then switching
5008 * BR/EDR back on is not allowed.
5010 * Dual-mode controllers shall operate with the public
5011 * address as its identity address for BR/EDR and LE. So
5012 * reject the attempt to create an invalid configuration.
5014 * The same restrictions applies when secure connections
5015 * has been enabled. For BR/EDR this is a controller feature
5016 * while for LE it is a host stack feature. This means that
5017 * switching BR/EDR back on when secure connections has been
5018 * enabled is not a supported transaction.
5020 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5021 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5022 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5023 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5024 MGMT_STATUS_REJECTED);
5029 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5030 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5035 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5041 /* We need to flip the bit already here so that update_adv_data
5042 * generates the correct flags.
5044 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5046 hci_req_init(&req, hdev);
5048 write_fast_connectable(&req, false);
5049 __hci_update_page_scan(&req);
5051 /* Since only the advertising data flags will change, there
5052 * is no need to update the scan response data.
5054 update_adv_data(&req);
5056 err = hci_req_run(&req, set_bredr_complete);
5058 mgmt_pending_remove(cmd);
5061 hci_dev_unlock(hdev);
5065 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5067 struct mgmt_pending_cmd *cmd;
5068 struct mgmt_mode *cp;
5070 BT_DBG("%s status %u", hdev->name, status);
5074 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5079 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5080 mgmt_status(status));
5088 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5089 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5092 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5093 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5096 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5097 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5101 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5102 new_settings(hdev, cmd->sk);
5105 mgmt_pending_remove(cmd);
5107 hci_dev_unlock(hdev);
5110 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5111 void *data, u16 len)
5113 struct mgmt_mode *cp = data;
5114 struct mgmt_pending_cmd *cmd;
5115 struct hci_request req;
5119 BT_DBG("request for %s", hdev->name);
5121 if (!lmp_sc_capable(hdev) &&
5122 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5123 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5124 MGMT_STATUS_NOT_SUPPORTED);
5126 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5127 lmp_sc_capable(hdev) &&
5128 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5129 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5130 MGMT_STATUS_REJECTED);
5132 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5133 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5134 MGMT_STATUS_INVALID_PARAMS);
5138 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5139 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5143 changed = !hci_dev_test_and_set_flag(hdev,
5145 if (cp->val == 0x02)
5146 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5148 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5150 changed = hci_dev_test_and_clear_flag(hdev,
5152 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5155 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5160 err = new_settings(hdev, sk);
5165 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5166 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5173 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5174 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5175 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5179 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5185 hci_req_init(&req, hdev);
5186 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5187 err = hci_req_run(&req, sc_enable_complete);
5189 mgmt_pending_remove(cmd);
5194 hci_dev_unlock(hdev);
5198 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5199 void *data, u16 len)
5201 struct mgmt_mode *cp = data;
5202 bool changed, use_changed;
5205 BT_DBG("request for %s", hdev->name);
5207 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5208 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5209 MGMT_STATUS_INVALID_PARAMS);
5214 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5216 changed = hci_dev_test_and_clear_flag(hdev,
5217 HCI_KEEP_DEBUG_KEYS);
5219 if (cp->val == 0x02)
5220 use_changed = !hci_dev_test_and_set_flag(hdev,
5221 HCI_USE_DEBUG_KEYS);
5223 use_changed = hci_dev_test_and_clear_flag(hdev,
5224 HCI_USE_DEBUG_KEYS);
5226 if (hdev_is_powered(hdev) && use_changed &&
5227 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5228 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5229 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5230 sizeof(mode), &mode);
5233 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5238 err = new_settings(hdev, sk);
5241 hci_dev_unlock(hdev);
5245 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5248 struct mgmt_cp_set_privacy *cp = cp_data;
5252 BT_DBG("request for %s", hdev->name);
5254 if (!lmp_le_capable(hdev))
5255 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5256 MGMT_STATUS_NOT_SUPPORTED);
5258 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5259 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5260 MGMT_STATUS_INVALID_PARAMS);
5262 if (hdev_is_powered(hdev))
5263 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5264 MGMT_STATUS_REJECTED);
5268 /* If user space supports this command it is also expected to
5269 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5271 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5274 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5275 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5276 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5278 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5279 memset(hdev->irk, 0, sizeof(hdev->irk));
5280 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5283 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5288 err = new_settings(hdev, sk);
5291 hci_dev_unlock(hdev);
5295 static bool irk_is_valid(struct mgmt_irk_info *irk)
5297 switch (irk->addr.type) {
5298 case BDADDR_LE_PUBLIC:
5301 case BDADDR_LE_RANDOM:
5302 /* Two most significant bits shall be set */
5303 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5311 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5314 struct mgmt_cp_load_irks *cp = cp_data;
5315 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5316 sizeof(struct mgmt_irk_info));
5317 u16 irk_count, expected_len;
5320 BT_DBG("request for %s", hdev->name);
5322 if (!lmp_le_capable(hdev))
5323 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5324 MGMT_STATUS_NOT_SUPPORTED);
5326 irk_count = __le16_to_cpu(cp->irk_count);
5327 if (irk_count > max_irk_count) {
5328 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5329 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5330 MGMT_STATUS_INVALID_PARAMS);
5333 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5334 if (expected_len != len) {
5335 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5337 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5338 MGMT_STATUS_INVALID_PARAMS);
5341 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5343 for (i = 0; i < irk_count; i++) {
5344 struct mgmt_irk_info *key = &cp->irks[i];
5346 if (!irk_is_valid(key))
5347 return mgmt_cmd_status(sk, hdev->id,
5349 MGMT_STATUS_INVALID_PARAMS);
5354 hci_smp_irks_clear(hdev);
5356 for (i = 0; i < irk_count; i++) {
5357 struct mgmt_irk_info *irk = &cp->irks[i];
5360 if (irk->addr.type == BDADDR_LE_PUBLIC)
5361 addr_type = ADDR_LE_DEV_PUBLIC;
5363 addr_type = ADDR_LE_DEV_RANDOM;
5365 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5369 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5371 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5373 hci_dev_unlock(hdev);
5378 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5380 if (key->master != 0x00 && key->master != 0x01)
5383 switch (key->addr.type) {
5384 case BDADDR_LE_PUBLIC:
5387 case BDADDR_LE_RANDOM:
5388 /* Two most significant bits shall be set */
5389 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5397 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5398 void *cp_data, u16 len)
5400 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5401 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5402 sizeof(struct mgmt_ltk_info));
5403 u16 key_count, expected_len;
5406 BT_DBG("request for %s", hdev->name);
5408 if (!lmp_le_capable(hdev))
5409 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5410 MGMT_STATUS_NOT_SUPPORTED);
5412 key_count = __le16_to_cpu(cp->key_count);
5413 if (key_count > max_key_count) {
5414 BT_ERR("load_ltks: too big key_count value %u", key_count);
5415 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5416 MGMT_STATUS_INVALID_PARAMS);
5419 expected_len = sizeof(*cp) + key_count *
5420 sizeof(struct mgmt_ltk_info);
5421 if (expected_len != len) {
5422 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5424 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5425 MGMT_STATUS_INVALID_PARAMS);
5428 BT_DBG("%s key_count %u", hdev->name, key_count);
5430 for (i = 0; i < key_count; i++) {
5431 struct mgmt_ltk_info *key = &cp->keys[i];
5433 if (!ltk_is_valid(key))
5434 return mgmt_cmd_status(sk, hdev->id,
5435 MGMT_OP_LOAD_LONG_TERM_KEYS,
5436 MGMT_STATUS_INVALID_PARAMS);
5441 hci_smp_ltks_clear(hdev);
5443 for (i = 0; i < key_count; i++) {
5444 struct mgmt_ltk_info *key = &cp->keys[i];
5445 u8 type, addr_type, authenticated;
5447 if (key->addr.type == BDADDR_LE_PUBLIC)
5448 addr_type = ADDR_LE_DEV_PUBLIC;
5450 addr_type = ADDR_LE_DEV_RANDOM;
5452 switch (key->type) {
5453 case MGMT_LTK_UNAUTHENTICATED:
5454 authenticated = 0x00;
5455 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5457 case MGMT_LTK_AUTHENTICATED:
5458 authenticated = 0x01;
5459 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5461 case MGMT_LTK_P256_UNAUTH:
5462 authenticated = 0x00;
5463 type = SMP_LTK_P256;
5465 case MGMT_LTK_P256_AUTH:
5466 authenticated = 0x01;
5467 type = SMP_LTK_P256;
5469 case MGMT_LTK_P256_DEBUG:
5470 authenticated = 0x00;
5471 type = SMP_LTK_P256_DEBUG;
5476 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5477 authenticated, key->val, key->enc_size, key->ediv,
5481 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5484 hci_dev_unlock(hdev);
5489 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5491 struct hci_conn *conn = cmd->user_data;
5492 struct mgmt_rp_get_conn_info rp;
5495 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5497 if (status == MGMT_STATUS_SUCCESS) {
5498 rp.rssi = conn->rssi;
5499 rp.tx_power = conn->tx_power;
5500 rp.max_tx_power = conn->max_tx_power;
5502 rp.rssi = HCI_RSSI_INVALID;
5503 rp.tx_power = HCI_TX_POWER_INVALID;
5504 rp.max_tx_power = HCI_TX_POWER_INVALID;
5507 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5508 status, &rp, sizeof(rp));
5510 hci_conn_drop(conn);
5516 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5519 struct hci_cp_read_rssi *cp;
5520 struct mgmt_pending_cmd *cmd;
5521 struct hci_conn *conn;
5525 BT_DBG("status 0x%02x", hci_status);
5529 /* Commands sent in request are either Read RSSI or Read Transmit Power
5530 * Level so we check which one was last sent to retrieve connection
5531 * handle. Both commands have handle as first parameter so it's safe to
5532 * cast data on the same command struct.
5534 * First command sent is always Read RSSI and we fail only if it fails.
5535 * In other case we simply override error to indicate success as we
5536 * already remembered if TX power value is actually valid.
5538 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5540 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5541 status = MGMT_STATUS_SUCCESS;
5543 status = mgmt_status(hci_status);
5547 BT_ERR("invalid sent_cmd in conn_info response");
5551 handle = __le16_to_cpu(cp->handle);
5552 conn = hci_conn_hash_lookup_handle(hdev, handle);
5554 BT_ERR("unknown handle (%d) in conn_info response", handle);
5558 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5562 cmd->cmd_complete(cmd, status);
5563 mgmt_pending_remove(cmd);
5566 hci_dev_unlock(hdev);
5569 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5572 struct mgmt_cp_get_conn_info *cp = data;
5573 struct mgmt_rp_get_conn_info rp;
5574 struct hci_conn *conn;
5575 unsigned long conn_info_age;
5578 BT_DBG("%s", hdev->name);
5580 memset(&rp, 0, sizeof(rp));
5581 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5582 rp.addr.type = cp->addr.type;
5584 if (!bdaddr_type_is_valid(cp->addr.type))
5585 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5586 MGMT_STATUS_INVALID_PARAMS,
5591 if (!hdev_is_powered(hdev)) {
5592 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5593 MGMT_STATUS_NOT_POWERED, &rp,
5598 if (cp->addr.type == BDADDR_BREDR)
5599 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5602 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5604 if (!conn || conn->state != BT_CONNECTED) {
5605 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5606 MGMT_STATUS_NOT_CONNECTED, &rp,
5611 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5612 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5613 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5617 /* To avoid client trying to guess when to poll again for information we
5618 * calculate conn info age as random value between min/max set in hdev.
5620 conn_info_age = hdev->conn_info_min_age +
5621 prandom_u32_max(hdev->conn_info_max_age -
5622 hdev->conn_info_min_age);
5624 /* Query controller to refresh cached values if they are too old or were
5627 if (time_after(jiffies, conn->conn_info_timestamp +
5628 msecs_to_jiffies(conn_info_age)) ||
5629 !conn->conn_info_timestamp) {
5630 struct hci_request req;
5631 struct hci_cp_read_tx_power req_txp_cp;
5632 struct hci_cp_read_rssi req_rssi_cp;
5633 struct mgmt_pending_cmd *cmd;
5635 hci_req_init(&req, hdev);
5636 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5637 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5640 /* For LE links TX power does not change thus we don't need to
5641 * query for it once value is known.
5643 if (!bdaddr_type_is_le(cp->addr.type) ||
5644 conn->tx_power == HCI_TX_POWER_INVALID) {
5645 req_txp_cp.handle = cpu_to_le16(conn->handle);
5646 req_txp_cp.type = 0x00;
5647 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5648 sizeof(req_txp_cp), &req_txp_cp);
5651 /* Max TX power needs to be read only once per connection */
5652 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5653 req_txp_cp.handle = cpu_to_le16(conn->handle);
5654 req_txp_cp.type = 0x01;
5655 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5656 sizeof(req_txp_cp), &req_txp_cp);
5659 err = hci_req_run(&req, conn_info_refresh_complete);
5663 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5670 hci_conn_hold(conn);
5671 cmd->user_data = hci_conn_get(conn);
5672 cmd->cmd_complete = conn_info_cmd_complete;
5674 conn->conn_info_timestamp = jiffies;
5676 /* Cache is valid, just reply with values cached in hci_conn */
5677 rp.rssi = conn->rssi;
5678 rp.tx_power = conn->tx_power;
5679 rp.max_tx_power = conn->max_tx_power;
5681 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5682 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5686 hci_dev_unlock(hdev);
5690 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5692 struct hci_conn *conn = cmd->user_data;
5693 struct mgmt_rp_get_clock_info rp;
5694 struct hci_dev *hdev;
5697 memset(&rp, 0, sizeof(rp));
5698 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5703 hdev = hci_dev_get(cmd->index);
5705 rp.local_clock = cpu_to_le32(hdev->clock);
5710 rp.piconet_clock = cpu_to_le32(conn->clock);
5711 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5715 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5719 hci_conn_drop(conn);
5726 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5728 struct hci_cp_read_clock *hci_cp;
5729 struct mgmt_pending_cmd *cmd;
5730 struct hci_conn *conn;
5732 BT_DBG("%s status %u", hdev->name, status);
5736 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5740 if (hci_cp->which) {
5741 u16 handle = __le16_to_cpu(hci_cp->handle);
5742 conn = hci_conn_hash_lookup_handle(hdev, handle);
5747 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5751 cmd->cmd_complete(cmd, mgmt_status(status));
5752 mgmt_pending_remove(cmd);
5755 hci_dev_unlock(hdev);
5758 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5761 struct mgmt_cp_get_clock_info *cp = data;
5762 struct mgmt_rp_get_clock_info rp;
5763 struct hci_cp_read_clock hci_cp;
5764 struct mgmt_pending_cmd *cmd;
5765 struct hci_request req;
5766 struct hci_conn *conn;
5769 BT_DBG("%s", hdev->name);
5771 memset(&rp, 0, sizeof(rp));
5772 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5773 rp.addr.type = cp->addr.type;
5775 if (cp->addr.type != BDADDR_BREDR)
5776 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5777 MGMT_STATUS_INVALID_PARAMS,
5782 if (!hdev_is_powered(hdev)) {
5783 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5784 MGMT_STATUS_NOT_POWERED, &rp,
5789 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5790 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5792 if (!conn || conn->state != BT_CONNECTED) {
5793 err = mgmt_cmd_complete(sk, hdev->id,
5794 MGMT_OP_GET_CLOCK_INFO,
5795 MGMT_STATUS_NOT_CONNECTED,
5803 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5809 cmd->cmd_complete = clock_info_cmd_complete;
5811 hci_req_init(&req, hdev);
5813 memset(&hci_cp, 0, sizeof(hci_cp));
5814 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5817 hci_conn_hold(conn);
5818 cmd->user_data = hci_conn_get(conn);
5820 hci_cp.handle = cpu_to_le16(conn->handle);
5821 hci_cp.which = 0x01; /* Piconet clock */
5822 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5825 err = hci_req_run(&req, get_clock_info_complete);
5827 mgmt_pending_remove(cmd);
5830 hci_dev_unlock(hdev);
5834 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5836 struct hci_conn *conn;
5838 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5842 if (conn->dst_type != type)
5845 if (conn->state != BT_CONNECTED)
5851 /* This function requires the caller holds hdev->lock */
5852 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5853 u8 addr_type, u8 auto_connect)
5855 struct hci_dev *hdev = req->hdev;
5856 struct hci_conn_params *params;
5858 params = hci_conn_params_add(hdev, addr, addr_type);
5862 if (params->auto_connect == auto_connect)
5865 list_del_init(¶ms->action);
5867 switch (auto_connect) {
5868 case HCI_AUTO_CONN_DISABLED:
5869 case HCI_AUTO_CONN_LINK_LOSS:
5870 __hci_update_background_scan(req);
5872 case HCI_AUTO_CONN_REPORT:
5873 list_add(¶ms->action, &hdev->pend_le_reports);
5874 __hci_update_background_scan(req);
5876 case HCI_AUTO_CONN_DIRECT:
5877 case HCI_AUTO_CONN_ALWAYS:
5878 if (!is_connected(hdev, addr, addr_type)) {
5879 list_add(¶ms->action, &hdev->pend_le_conns);
5880 __hci_update_background_scan(req);
5885 params->auto_connect = auto_connect;
5887 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5893 static void device_added(struct sock *sk, struct hci_dev *hdev,
5894 bdaddr_t *bdaddr, u8 type, u8 action)
5896 struct mgmt_ev_device_added ev;
5898 bacpy(&ev.addr.bdaddr, bdaddr);
5899 ev.addr.type = type;
5902 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5905 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5907 struct mgmt_pending_cmd *cmd;
5909 BT_DBG("status 0x%02x", status);
5913 cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
5917 cmd->cmd_complete(cmd, mgmt_status(status));
5918 mgmt_pending_remove(cmd);
5921 hci_dev_unlock(hdev);
5924 static int add_device(struct sock *sk, struct hci_dev *hdev,
5925 void *data, u16 len)
5927 struct mgmt_cp_add_device *cp = data;
5928 struct mgmt_pending_cmd *cmd;
5929 struct hci_request req;
5930 u8 auto_conn, addr_type;
5933 BT_DBG("%s", hdev->name);
5935 if (!bdaddr_type_is_valid(cp->addr.type) ||
5936 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5937 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5938 MGMT_STATUS_INVALID_PARAMS,
5939 &cp->addr, sizeof(cp->addr));
5941 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5942 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5943 MGMT_STATUS_INVALID_PARAMS,
5944 &cp->addr, sizeof(cp->addr));
5946 hci_req_init(&req, hdev);
5950 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5956 cmd->cmd_complete = addr_cmd_complete;
5958 if (cp->addr.type == BDADDR_BREDR) {
5959 /* Only incoming connections action is supported for now */
5960 if (cp->action != 0x01) {
5961 err = cmd->cmd_complete(cmd,
5962 MGMT_STATUS_INVALID_PARAMS);
5963 mgmt_pending_remove(cmd);
5967 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5972 __hci_update_page_scan(&req);
5977 if (cp->addr.type == BDADDR_LE_PUBLIC)
5978 addr_type = ADDR_LE_DEV_PUBLIC;
5980 addr_type = ADDR_LE_DEV_RANDOM;
5982 if (cp->action == 0x02)
5983 auto_conn = HCI_AUTO_CONN_ALWAYS;
5984 else if (cp->action == 0x01)
5985 auto_conn = HCI_AUTO_CONN_DIRECT;
5987 auto_conn = HCI_AUTO_CONN_REPORT;
5989 /* If the connection parameters don't exist for this device,
5990 * they will be created and configured with defaults.
5992 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5994 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5995 mgmt_pending_remove(cmd);
6000 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6002 err = hci_req_run(&req, add_device_complete);
6004 /* ENODATA means no HCI commands were needed (e.g. if
6005 * the adapter is powered off).
6007 if (err == -ENODATA)
6008 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6009 mgmt_pending_remove(cmd);
6013 hci_dev_unlock(hdev);
6017 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6018 bdaddr_t *bdaddr, u8 type)
6020 struct mgmt_ev_device_removed ev;
6022 bacpy(&ev.addr.bdaddr, bdaddr);
6023 ev.addr.type = type;
6025 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6028 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6030 struct mgmt_pending_cmd *cmd;
6032 BT_DBG("status 0x%02x", status);
6036 cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
6040 cmd->cmd_complete(cmd, mgmt_status(status));
6041 mgmt_pending_remove(cmd);
6044 hci_dev_unlock(hdev);
6047 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6048 void *data, u16 len)
6050 struct mgmt_cp_remove_device *cp = data;
6051 struct mgmt_pending_cmd *cmd;
6052 struct hci_request req;
6055 BT_DBG("%s", hdev->name);
6057 hci_req_init(&req, hdev);
6061 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
6067 cmd->cmd_complete = addr_cmd_complete;
6069 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6070 struct hci_conn_params *params;
6073 if (!bdaddr_type_is_valid(cp->addr.type)) {
6074 err = cmd->cmd_complete(cmd,
6075 MGMT_STATUS_INVALID_PARAMS);
6076 mgmt_pending_remove(cmd);
6080 if (cp->addr.type == BDADDR_BREDR) {
6081 err = hci_bdaddr_list_del(&hdev->whitelist,
6085 err = cmd->cmd_complete(cmd,
6086 MGMT_STATUS_INVALID_PARAMS);
6087 mgmt_pending_remove(cmd);
6091 __hci_update_page_scan(&req);
6093 device_removed(sk, hdev, &cp->addr.bdaddr,
6098 if (cp->addr.type == BDADDR_LE_PUBLIC)
6099 addr_type = ADDR_LE_DEV_PUBLIC;
6101 addr_type = ADDR_LE_DEV_RANDOM;
6103 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6106 err = cmd->cmd_complete(cmd,
6107 MGMT_STATUS_INVALID_PARAMS);
6108 mgmt_pending_remove(cmd);
6112 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
6113 err = cmd->cmd_complete(cmd,
6114 MGMT_STATUS_INVALID_PARAMS);
6115 mgmt_pending_remove(cmd);
6119 list_del(¶ms->action);
6120 list_del(¶ms->list);
6122 __hci_update_background_scan(&req);
6124 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6126 struct hci_conn_params *p, *tmp;
6127 struct bdaddr_list *b, *btmp;
6129 if (cp->addr.type) {
6130 err = cmd->cmd_complete(cmd,
6131 MGMT_STATUS_INVALID_PARAMS);
6132 mgmt_pending_remove(cmd);
6136 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6137 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6142 __hci_update_page_scan(&req);
6144 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6145 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6147 device_removed(sk, hdev, &p->addr, p->addr_type);
6148 list_del(&p->action);
6153 BT_DBG("All LE connection parameters were removed");
6155 __hci_update_background_scan(&req);
6159 err = hci_req_run(&req, remove_device_complete);
6161 /* ENODATA means no HCI commands were needed (e.g. if
6162 * the adapter is powered off).
6164 if (err == -ENODATA)
6165 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6166 mgmt_pending_remove(cmd);
6170 hci_dev_unlock(hdev);
6174 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6177 struct mgmt_cp_load_conn_param *cp = data;
6178 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6179 sizeof(struct mgmt_conn_param));
6180 u16 param_count, expected_len;
6183 if (!lmp_le_capable(hdev))
6184 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6185 MGMT_STATUS_NOT_SUPPORTED);
6187 param_count = __le16_to_cpu(cp->param_count);
6188 if (param_count > max_param_count) {
6189 BT_ERR("load_conn_param: too big param_count value %u",
6191 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6192 MGMT_STATUS_INVALID_PARAMS);
6195 expected_len = sizeof(*cp) + param_count *
6196 sizeof(struct mgmt_conn_param);
6197 if (expected_len != len) {
6198 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6200 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6201 MGMT_STATUS_INVALID_PARAMS);
6204 BT_DBG("%s param_count %u", hdev->name, param_count);
6208 hci_conn_params_clear_disabled(hdev);
6210 for (i = 0; i < param_count; i++) {
6211 struct mgmt_conn_param *param = &cp->params[i];
6212 struct hci_conn_params *hci_param;
6213 u16 min, max, latency, timeout;
6216 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
6219 if (param->addr.type == BDADDR_LE_PUBLIC) {
6220 addr_type = ADDR_LE_DEV_PUBLIC;
6221 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6222 addr_type = ADDR_LE_DEV_RANDOM;
6224 BT_ERR("Ignoring invalid connection parameters");
6228 min = le16_to_cpu(param->min_interval);
6229 max = le16_to_cpu(param->max_interval);
6230 latency = le16_to_cpu(param->latency);
6231 timeout = le16_to_cpu(param->timeout);
6233 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6234 min, max, latency, timeout);
6236 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6237 BT_ERR("Ignoring invalid connection parameters");
6241 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6244 BT_ERR("Failed to add connection parameters");
6248 hci_param->conn_min_interval = min;
6249 hci_param->conn_max_interval = max;
6250 hci_param->conn_latency = latency;
6251 hci_param->supervision_timeout = timeout;
6254 hci_dev_unlock(hdev);
6256 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6260 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6261 void *data, u16 len)
6263 struct mgmt_cp_set_external_config *cp = data;
6267 BT_DBG("%s", hdev->name);
6269 if (hdev_is_powered(hdev))
6270 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6271 MGMT_STATUS_REJECTED);
6273 if (cp->config != 0x00 && cp->config != 0x01)
6274 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6275 MGMT_STATUS_INVALID_PARAMS);
6277 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6278 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6279 MGMT_STATUS_NOT_SUPPORTED);
6284 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6286 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6288 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6295 err = new_options(hdev, sk);
6297 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6298 mgmt_index_removed(hdev);
6300 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6301 hci_dev_set_flag(hdev, HCI_CONFIG);
6302 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6304 queue_work(hdev->req_workqueue, &hdev->power_on);
6306 set_bit(HCI_RAW, &hdev->flags);
6307 mgmt_index_added(hdev);
6312 hci_dev_unlock(hdev);
6316 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6317 void *data, u16 len)
6319 struct mgmt_cp_set_public_address *cp = data;
6323 BT_DBG("%s", hdev->name);
6325 if (hdev_is_powered(hdev))
6326 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6327 MGMT_STATUS_REJECTED);
6329 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6330 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6331 MGMT_STATUS_INVALID_PARAMS);
6333 if (!hdev->set_bdaddr)
6334 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6335 MGMT_STATUS_NOT_SUPPORTED);
6339 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6340 bacpy(&hdev->public_addr, &cp->bdaddr);
6342 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6349 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6350 err = new_options(hdev, sk);
6352 if (is_configured(hdev)) {
6353 mgmt_index_removed(hdev);
6355 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6357 hci_dev_set_flag(hdev, HCI_CONFIG);
6358 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6360 queue_work(hdev->req_workqueue, &hdev->power_on);
6364 hci_dev_unlock(hdev);
6368 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6371 eir[eir_len++] = sizeof(type) + data_len;
6372 eir[eir_len++] = type;
6373 memcpy(&eir[eir_len], data, data_len);
6374 eir_len += data_len;
6379 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6380 void *data, u16 data_len)
6382 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6383 struct mgmt_rp_read_local_oob_ext_data *rp;
6386 u8 status, flags, role, addr[7], hash[16], rand[16];
6389 BT_DBG("%s", hdev->name);
6391 if (!hdev_is_powered(hdev))
6392 return mgmt_cmd_complete(sk, hdev->id,
6393 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6394 MGMT_STATUS_NOT_POWERED,
6395 &cp->type, sizeof(cp->type));
6398 case BIT(BDADDR_BREDR):
6399 status = mgmt_bredr_support(hdev);
6401 return mgmt_cmd_complete(sk, hdev->id,
6402 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6407 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6408 status = mgmt_le_support(hdev);
6410 return mgmt_cmd_complete(sk, hdev->id,
6411 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6414 eir_len = 9 + 3 + 18 + 18 + 3;
6417 return mgmt_cmd_complete(sk, hdev->id,
6418 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6419 MGMT_STATUS_INVALID_PARAMS,
6420 &cp->type, sizeof(cp->type));
6425 rp_len = sizeof(*rp) + eir_len;
6426 rp = kmalloc(rp_len, GFP_ATOMIC);
6428 hci_dev_unlock(hdev);
6434 case BIT(BDADDR_BREDR):
6435 eir_len = eir_append_data(rp->eir, eir_len, EIR_CLASS_OF_DEV,
6436 hdev->dev_class, 3);
6438 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6439 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6440 smp_generate_oob(hdev, hash, rand) < 0) {
6441 hci_dev_unlock(hdev);
6442 err = mgmt_cmd_complete(sk, hdev->id,
6443 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6445 &cp->type, sizeof(cp->type));
6449 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6450 memcpy(addr, &hdev->rpa, 6);
6452 } else if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6453 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6454 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6455 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6456 memcpy(addr, &hdev->static_addr, 6);
6459 memcpy(addr, &hdev->bdaddr, 6);
6463 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6464 addr, sizeof(addr));
6466 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6471 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6472 &role, sizeof(role));
6474 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6475 eir_len = eir_append_data(rp->eir, eir_len,
6477 hash, sizeof(hash));
6479 eir_len = eir_append_data(rp->eir, eir_len,
6481 rand, sizeof(rand));
6484 flags = get_adv_discov_flags(hdev);
6486 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6487 flags |= LE_AD_NO_BREDR;
6489 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6490 &flags, sizeof(flags));
6494 rp->type = cp->type;
6495 rp->eir_len = cpu_to_le16(eir_len);
6497 hci_dev_unlock(hdev);
6499 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6501 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6502 MGMT_STATUS_SUCCESS, rp, sizeof(*rp) + eir_len);
6506 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6507 rp, sizeof(*rp) + eir_len,
6508 HCI_MGMT_OOB_DATA_EVENTS, sk);
6516 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6520 flags |= MGMT_ADV_FLAG_CONNECTABLE;
6521 flags |= MGMT_ADV_FLAG_DISCOV;
6522 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6523 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6525 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
6526 flags |= MGMT_ADV_FLAG_TX_POWER;
6531 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6532 void *data, u16 data_len)
6534 struct mgmt_rp_read_adv_features *rp;
6538 u32 supported_flags;
6540 BT_DBG("%s", hdev->name);
6542 if (!lmp_le_capable(hdev))
6543 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6544 MGMT_STATUS_REJECTED);
6548 rp_len = sizeof(*rp);
6550 /* Currently only one instance is supported, so just add 1 to the
6553 instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
6557 rp = kmalloc(rp_len, GFP_ATOMIC);
6559 hci_dev_unlock(hdev);
6563 supported_flags = get_supported_adv_flags(hdev);
6565 rp->supported_flags = cpu_to_le32(supported_flags);
6566 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6567 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6568 rp->max_instances = 1;
6570 /* Currently only one instance is supported, so simply return the
6571 * current instance number.
6574 rp->num_instances = 1;
6575 rp->instance[0] = 1;
6577 rp->num_instances = 0;
6580 hci_dev_unlock(hdev);
6582 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6583 MGMT_STATUS_SUCCESS, rp, rp_len);
6590 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6591 u8 len, bool is_adv_data)
6593 u8 max_len = HCI_MAX_AD_LENGTH;
6595 bool flags_managed = false;
6596 bool tx_power_managed = false;
6597 u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
6598 MGMT_ADV_FLAG_MANAGED_FLAGS;
6600 if (is_adv_data && (adv_flags & flags_params)) {
6601 flags_managed = true;
6605 if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
6606 tx_power_managed = true;
6613 /* Make sure that the data is correctly formatted. */
6614 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6617 if (flags_managed && data[i + 1] == EIR_FLAGS)
6620 if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
6623 /* If the current field length would exceed the total data
6624 * length, then it's invalid.
6626 if (i + cur_len >= len)
6633 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6636 struct mgmt_pending_cmd *cmd;
6637 struct mgmt_rp_add_advertising rp;
6639 BT_DBG("status %d", status);
6643 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6646 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6647 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6648 advertising_removed(cmd ? cmd->sk : NULL, hdev, 1);
6657 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6658 mgmt_status(status));
6660 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6661 mgmt_status(status), &rp, sizeof(rp));
6663 mgmt_pending_remove(cmd);
6666 hci_dev_unlock(hdev);
6669 static void adv_timeout_expired(struct work_struct *work)
6671 struct hci_dev *hdev = container_of(work, struct hci_dev,
6672 adv_instance.timeout_exp.work);
6674 hdev->adv_instance.timeout = 0;
6677 clear_adv_instance(hdev);
6678 hci_dev_unlock(hdev);
6681 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6682 void *data, u16 data_len)
6684 struct mgmt_cp_add_advertising *cp = data;
6685 struct mgmt_rp_add_advertising rp;
6687 u32 supported_flags;
6691 struct mgmt_pending_cmd *cmd;
6692 struct hci_request req;
6694 BT_DBG("%s", hdev->name);
6696 status = mgmt_le_support(hdev);
6698 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6701 flags = __le32_to_cpu(cp->flags);
6702 timeout = __le16_to_cpu(cp->timeout);
6704 /* The current implementation only supports adding one instance and only
6705 * a subset of the specified flags.
6707 supported_flags = get_supported_adv_flags(hdev);
6708 if (cp->instance != 0x01 || (flags & ~supported_flags))
6709 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6710 MGMT_STATUS_INVALID_PARAMS);
6714 if (timeout && !hdev_is_powered(hdev)) {
6715 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6716 MGMT_STATUS_REJECTED);
6720 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6721 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6722 pending_find(MGMT_OP_SET_LE, hdev)) {
6723 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6728 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6729 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6730 cp->scan_rsp_len, false)) {
6731 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6732 MGMT_STATUS_INVALID_PARAMS);
6736 INIT_DELAYED_WORK(&hdev->adv_instance.timeout_exp, adv_timeout_expired);
6738 hdev->adv_instance.flags = flags;
6739 hdev->adv_instance.adv_data_len = cp->adv_data_len;
6740 hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len;
6742 if (cp->adv_data_len)
6743 memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len);
6745 if (cp->scan_rsp_len)
6746 memcpy(hdev->adv_instance.scan_rsp_data,
6747 cp->data + cp->adv_data_len, cp->scan_rsp_len);
6749 if (hdev->adv_instance.timeout)
6750 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
6752 hdev->adv_instance.timeout = timeout;
6755 queue_delayed_work(hdev->workqueue,
6756 &hdev->adv_instance.timeout_exp,
6757 msecs_to_jiffies(timeout * 1000));
6759 if (!hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING_INSTANCE))
6760 advertising_added(sk, hdev, 1);
6762 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
6763 * we have no HCI communication to make. Simply return.
6765 if (!hdev_is_powered(hdev) ||
6766 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6768 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6769 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6773 /* We're good to go, update advertising data, parameters, and start
6776 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6783 hci_req_init(&req, hdev);
6785 update_adv_data(&req);
6786 update_scan_rsp_data(&req);
6787 enable_advertising(&req);
6789 err = hci_req_run(&req, add_advertising_complete);
6791 mgmt_pending_remove(cmd);
6794 hci_dev_unlock(hdev);
6799 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6802 struct mgmt_pending_cmd *cmd;
6803 struct mgmt_rp_remove_advertising rp;
6805 BT_DBG("status %d", status);
6809 /* A failure status here only means that we failed to disable
6810 * advertising. Otherwise, the advertising instance has been removed,
6811 * so report success.
6813 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6819 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6821 mgmt_pending_remove(cmd);
6824 hci_dev_unlock(hdev);
6827 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6828 void *data, u16 data_len)
6830 struct mgmt_cp_remove_advertising *cp = data;
6831 struct mgmt_rp_remove_advertising rp;
6833 struct mgmt_pending_cmd *cmd;
6834 struct hci_request req;
6836 BT_DBG("%s", hdev->name);
6838 /* The current implementation only allows modifying instance no 1. A
6839 * value of 0 indicates that all instances should be cleared.
6841 if (cp->instance > 1)
6842 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6843 MGMT_STATUS_INVALID_PARAMS);
6847 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6848 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6849 pending_find(MGMT_OP_SET_LE, hdev)) {
6850 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6855 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
6856 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6857 MGMT_STATUS_INVALID_PARAMS);
6861 if (hdev->adv_instance.timeout)
6862 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
6864 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6866 advertising_removed(sk, hdev, 1);
6868 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6870 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
6871 * we have no HCI communication to make. Simply return.
6873 if (!hdev_is_powered(hdev) ||
6874 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6876 err = mgmt_cmd_complete(sk, hdev->id,
6877 MGMT_OP_REMOVE_ADVERTISING,
6878 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6882 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6889 hci_req_init(&req, hdev);
6890 disable_advertising(&req);
6892 err = hci_req_run(&req, remove_advertising_complete);
6894 mgmt_pending_remove(cmd);
6897 hci_dev_unlock(hdev);
6902 static const struct hci_mgmt_handler mgmt_handlers[] = {
6903 { NULL }, /* 0x0000 (no command) */
6904 { read_version, MGMT_READ_VERSION_SIZE,
6906 HCI_MGMT_UNTRUSTED },
6907 { read_commands, MGMT_READ_COMMANDS_SIZE,
6909 HCI_MGMT_UNTRUSTED },
6910 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6912 HCI_MGMT_UNTRUSTED },
6913 { read_controller_info, MGMT_READ_INFO_SIZE,
6914 HCI_MGMT_UNTRUSTED },
6915 { set_powered, MGMT_SETTING_SIZE },
6916 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
6917 { set_connectable, MGMT_SETTING_SIZE },
6918 { set_fast_connectable, MGMT_SETTING_SIZE },
6919 { set_bondable, MGMT_SETTING_SIZE },
6920 { set_link_security, MGMT_SETTING_SIZE },
6921 { set_ssp, MGMT_SETTING_SIZE },
6922 { set_hs, MGMT_SETTING_SIZE },
6923 { set_le, MGMT_SETTING_SIZE },
6924 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
6925 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
6926 { add_uuid, MGMT_ADD_UUID_SIZE },
6927 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
6928 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6930 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6932 { disconnect, MGMT_DISCONNECT_SIZE },
6933 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
6934 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
6935 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6936 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
6937 { pair_device, MGMT_PAIR_DEVICE_SIZE },
6938 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6939 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
6940 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
6941 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6942 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
6943 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6944 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6945 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6947 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6948 { start_discovery, MGMT_START_DISCOVERY_SIZE },
6949 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
6950 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
6951 { block_device, MGMT_BLOCK_DEVICE_SIZE },
6952 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
6953 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
6954 { set_advertising, MGMT_SETTING_SIZE },
6955 { set_bredr, MGMT_SETTING_SIZE },
6956 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
6957 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
6958 { set_secure_conn, MGMT_SETTING_SIZE },
6959 { set_debug_keys, MGMT_SETTING_SIZE },
6960 { set_privacy, MGMT_SET_PRIVACY_SIZE },
6961 { load_irks, MGMT_LOAD_IRKS_SIZE,
6963 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
6964 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
6965 { add_device, MGMT_ADD_DEVICE_SIZE },
6966 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
6967 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6969 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6971 HCI_MGMT_UNTRUSTED },
6972 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6973 HCI_MGMT_UNCONFIGURED |
6974 HCI_MGMT_UNTRUSTED },
6975 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6976 HCI_MGMT_UNCONFIGURED },
6977 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6978 HCI_MGMT_UNCONFIGURED },
6979 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6981 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6982 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
6984 HCI_MGMT_UNTRUSTED },
6985 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
6986 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
6988 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
6991 void mgmt_index_added(struct hci_dev *hdev)
6993 struct mgmt_ev_ext_index ev;
6995 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6998 switch (hdev->dev_type) {
7000 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7001 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7002 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7005 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7006 HCI_MGMT_INDEX_EVENTS);
7019 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7020 HCI_MGMT_EXT_INDEX_EVENTS);
7023 void mgmt_index_removed(struct hci_dev *hdev)
7025 struct mgmt_ev_ext_index ev;
7026 u8 status = MGMT_STATUS_INVALID_INDEX;
7028 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7031 switch (hdev->dev_type) {
7033 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7035 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7036 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7037 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7040 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7041 HCI_MGMT_INDEX_EVENTS);
7054 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7055 HCI_MGMT_EXT_INDEX_EVENTS);
7058 /* This function requires the caller holds hdev->lock */
7059 static void restart_le_actions(struct hci_request *req)
7061 struct hci_dev *hdev = req->hdev;
7062 struct hci_conn_params *p;
7064 list_for_each_entry(p, &hdev->le_conn_params, list) {
7065 /* Needed for AUTO_OFF case where might not "really"
7066 * have been powered off.
7068 list_del_init(&p->action);
7070 switch (p->auto_connect) {
7071 case HCI_AUTO_CONN_DIRECT:
7072 case HCI_AUTO_CONN_ALWAYS:
7073 list_add(&p->action, &hdev->pend_le_conns);
7075 case HCI_AUTO_CONN_REPORT:
7076 list_add(&p->action, &hdev->pend_le_reports);
7083 __hci_update_background_scan(req);
7086 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7088 struct cmd_lookup match = { NULL, hdev };
7090 BT_DBG("status 0x%02x", status);
7093 /* Register the available SMP channels (BR/EDR and LE) only
7094 * when successfully powering on the controller. This late
7095 * registration is required so that LE SMP can clearly
7096 * decide if the public address or static address is used.
7103 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7105 new_settings(hdev, match.sk);
7107 hci_dev_unlock(hdev);
7113 static int powered_update_hci(struct hci_dev *hdev)
7115 struct hci_request req;
7118 hci_req_init(&req, hdev);
7120 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
7121 !lmp_host_ssp_capable(hdev)) {
7124 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
7126 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
7129 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
7130 sizeof(support), &support);
7134 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7135 lmp_bredr_capable(hdev)) {
7136 struct hci_cp_write_le_host_supported cp;
7141 /* Check first if we already have the right
7142 * host state (host features set)
7144 if (cp.le != lmp_host_le_capable(hdev) ||
7145 cp.simul != lmp_host_le_br_capable(hdev))
7146 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
7150 if (lmp_le_capable(hdev)) {
7151 /* Make sure the controller has a good default for
7152 * advertising data. This also applies to the case
7153 * where BR/EDR was toggled during the AUTO_OFF phase.
7155 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
7156 update_adv_data(&req);
7157 update_scan_rsp_data(&req);
7160 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7161 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
7162 enable_advertising(&req);
7164 restart_le_actions(&req);
7167 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
7168 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
7169 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
7170 sizeof(link_sec), &link_sec);
7172 if (lmp_bredr_capable(hdev)) {
7173 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
7174 write_fast_connectable(&req, true);
7176 write_fast_connectable(&req, false);
7177 __hci_update_page_scan(&req);
7183 return hci_req_run(&req, powered_complete);
7186 int mgmt_powered(struct hci_dev *hdev, u8 powered)
7188 struct cmd_lookup match = { NULL, hdev };
7189 u8 status, zero_cod[] = { 0, 0, 0 };
7192 if (!hci_dev_test_flag(hdev, HCI_MGMT))
7196 if (powered_update_hci(hdev) == 0)
7199 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
7204 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7206 /* If the power off is because of hdev unregistration let
7207 * use the appropriate INVALID_INDEX status. Otherwise use
7208 * NOT_POWERED. We cover both scenarios here since later in
7209 * mgmt_index_removed() any hci_conn callbacks will have already
7210 * been triggered, potentially causing misleading DISCONNECTED
7213 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7214 status = MGMT_STATUS_INVALID_INDEX;
7216 status = MGMT_STATUS_NOT_POWERED;
7218 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7220 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
7221 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7222 zero_cod, sizeof(zero_cod), NULL);
7225 err = new_settings(hdev, match.sk);
7233 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7235 struct mgmt_pending_cmd *cmd;
7238 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7242 if (err == -ERFKILL)
7243 status = MGMT_STATUS_RFKILLED;
7245 status = MGMT_STATUS_FAILED;
7247 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7249 mgmt_pending_remove(cmd);
7252 void mgmt_discoverable_timeout(struct hci_dev *hdev)
7254 struct hci_request req;
7258 /* When discoverable timeout triggers, then just make sure
7259 * the limited discoverable flag is cleared. Even in the case
7260 * of a timeout triggered from general discoverable, it is
7261 * safe to unconditionally clear the flag.
7263 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
7264 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
7266 hci_req_init(&req, hdev);
7267 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7268 u8 scan = SCAN_PAGE;
7269 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
7270 sizeof(scan), &scan);
7274 /* Advertising instances don't use the global discoverable setting, so
7275 * only update AD if advertising was enabled using Set Advertising.
7277 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7278 update_adv_data(&req);
7280 hci_req_run(&req, NULL);
7282 hdev->discov_timeout = 0;
7284 new_settings(hdev, NULL);
7286 hci_dev_unlock(hdev);
7289 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7292 struct mgmt_ev_new_link_key ev;
7294 memset(&ev, 0, sizeof(ev));
7296 ev.store_hint = persistent;
7297 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7298 ev.key.addr.type = BDADDR_BREDR;
7299 ev.key.type = key->type;
7300 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7301 ev.key.pin_len = key->pin_len;
7303 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7306 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7308 switch (ltk->type) {
7311 if (ltk->authenticated)
7312 return MGMT_LTK_AUTHENTICATED;
7313 return MGMT_LTK_UNAUTHENTICATED;
7315 if (ltk->authenticated)
7316 return MGMT_LTK_P256_AUTH;
7317 return MGMT_LTK_P256_UNAUTH;
7318 case SMP_LTK_P256_DEBUG:
7319 return MGMT_LTK_P256_DEBUG;
7322 return MGMT_LTK_UNAUTHENTICATED;
7325 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7327 struct mgmt_ev_new_long_term_key ev;
7329 memset(&ev, 0, sizeof(ev));
7331 /* Devices using resolvable or non-resolvable random addresses
7332 * without providing an indentity resolving key don't require
7333 * to store long term keys. Their addresses will change the
7336 * Only when a remote device provides an identity address
7337 * make sure the long term key is stored. If the remote
7338 * identity is known, the long term keys are internally
7339 * mapped to the identity address. So allow static random
7340 * and public addresses here.
7342 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7343 (key->bdaddr.b[5] & 0xc0) != 0xc0)
7344 ev.store_hint = 0x00;
7346 ev.store_hint = persistent;
7348 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7349 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7350 ev.key.type = mgmt_ltk_type(key);
7351 ev.key.enc_size = key->enc_size;
7352 ev.key.ediv = key->ediv;
7353 ev.key.rand = key->rand;
7355 if (key->type == SMP_LTK)
7358 memcpy(ev.key.val, key->val, sizeof(key->val));
7360 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7363 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
7365 struct mgmt_ev_new_irk ev;
7367 memset(&ev, 0, sizeof(ev));
7369 /* For identity resolving keys from devices that are already
7370 * using a public address or static random address, do not
7371 * ask for storing this key. The identity resolving key really
7372 * is only mandatory for devices using resovlable random
7375 * Storing all identity resolving keys has the downside that
7376 * they will be also loaded on next boot of they system. More
7377 * identity resolving keys, means more time during scanning is
7378 * needed to actually resolve these addresses.
7380 if (bacmp(&irk->rpa, BDADDR_ANY))
7381 ev.store_hint = 0x01;
7383 ev.store_hint = 0x00;
7385 bacpy(&ev.rpa, &irk->rpa);
7386 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7387 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7388 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7390 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7393 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7396 struct mgmt_ev_new_csrk ev;
7398 memset(&ev, 0, sizeof(ev));
7400 /* Devices using resolvable or non-resolvable random addresses
7401 * without providing an indentity resolving key don't require
7402 * to store signature resolving keys. Their addresses will change
7403 * the next time around.
7405 * Only when a remote device provides an identity address
7406 * make sure the signature resolving key is stored. So allow
7407 * static random and public addresses here.
7409 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7410 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7411 ev.store_hint = 0x00;
7413 ev.store_hint = persistent;
7415 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7416 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7417 ev.key.type = csrk->type;
7418 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7420 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7423 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7424 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7425 u16 max_interval, u16 latency, u16 timeout)
7427 struct mgmt_ev_new_conn_param ev;
7429 if (!hci_is_identity_address(bdaddr, bdaddr_type))
7432 memset(&ev, 0, sizeof(ev));
7433 bacpy(&ev.addr.bdaddr, bdaddr);
7434 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7435 ev.store_hint = store_hint;
7436 ev.min_interval = cpu_to_le16(min_interval);
7437 ev.max_interval = cpu_to_le16(max_interval);
7438 ev.latency = cpu_to_le16(latency);
7439 ev.timeout = cpu_to_le16(timeout);
7441 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7444 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7445 u32 flags, u8 *name, u8 name_len)
7448 struct mgmt_ev_device_connected *ev = (void *) buf;
7451 bacpy(&ev->addr.bdaddr, &conn->dst);
7452 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7454 ev->flags = __cpu_to_le32(flags);
7456 /* We must ensure that the EIR Data fields are ordered and
7457 * unique. Keep it simple for now and avoid the problem by not
7458 * adding any BR/EDR data to the LE adv.
7460 if (conn->le_adv_data_len > 0) {
7461 memcpy(&ev->eir[eir_len],
7462 conn->le_adv_data, conn->le_adv_data_len);
7463 eir_len = conn->le_adv_data_len;
7466 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7469 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7470 eir_len = eir_append_data(ev->eir, eir_len,
7472 conn->dev_class, 3);
7475 ev->eir_len = cpu_to_le16(eir_len);
7477 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7478 sizeof(*ev) + eir_len, NULL);
7481 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7483 struct sock **sk = data;
7485 cmd->cmd_complete(cmd, 0);
7490 mgmt_pending_remove(cmd);
7493 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7495 struct hci_dev *hdev = data;
7496 struct mgmt_cp_unpair_device *cp = cmd->param;
7498 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7500 cmd->cmd_complete(cmd, 0);
7501 mgmt_pending_remove(cmd);
7504 bool mgmt_powering_down(struct hci_dev *hdev)
7506 struct mgmt_pending_cmd *cmd;
7507 struct mgmt_mode *cp;
7509 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7520 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7521 u8 link_type, u8 addr_type, u8 reason,
7522 bool mgmt_connected)
7524 struct mgmt_ev_device_disconnected ev;
7525 struct sock *sk = NULL;
7527 /* The connection is still in hci_conn_hash so test for 1
7528 * instead of 0 to know if this is the last one.
7530 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7531 cancel_delayed_work(&hdev->power_off);
7532 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7535 if (!mgmt_connected)
7538 if (link_type != ACL_LINK && link_type != LE_LINK)
7541 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7543 bacpy(&ev.addr.bdaddr, bdaddr);
7544 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7547 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7552 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7556 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7557 u8 link_type, u8 addr_type, u8 status)
7559 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7560 struct mgmt_cp_disconnect *cp;
7561 struct mgmt_pending_cmd *cmd;
7563 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7566 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7572 if (bacmp(bdaddr, &cp->addr.bdaddr))
7575 if (cp->addr.type != bdaddr_type)
7578 cmd->cmd_complete(cmd, mgmt_status(status));
7579 mgmt_pending_remove(cmd);
7582 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7583 u8 addr_type, u8 status)
7585 struct mgmt_ev_connect_failed ev;
7587 /* The connection is still in hci_conn_hash so test for 1
7588 * instead of 0 to know if this is the last one.
7590 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7591 cancel_delayed_work(&hdev->power_off);
7592 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7595 bacpy(&ev.addr.bdaddr, bdaddr);
7596 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7597 ev.status = mgmt_status(status);
7599 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7602 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7604 struct mgmt_ev_pin_code_request ev;
7606 bacpy(&ev.addr.bdaddr, bdaddr);
7607 ev.addr.type = BDADDR_BREDR;
7610 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7613 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7616 struct mgmt_pending_cmd *cmd;
7618 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7622 cmd->cmd_complete(cmd, mgmt_status(status));
7623 mgmt_pending_remove(cmd);
7626 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7629 struct mgmt_pending_cmd *cmd;
7631 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7635 cmd->cmd_complete(cmd, mgmt_status(status));
7636 mgmt_pending_remove(cmd);
7639 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7640 u8 link_type, u8 addr_type, u32 value,
7643 struct mgmt_ev_user_confirm_request ev;
7645 BT_DBG("%s", hdev->name);
7647 bacpy(&ev.addr.bdaddr, bdaddr);
7648 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7649 ev.confirm_hint = confirm_hint;
7650 ev.value = cpu_to_le32(value);
7652 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7656 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7657 u8 link_type, u8 addr_type)
7659 struct mgmt_ev_user_passkey_request ev;
7661 BT_DBG("%s", hdev->name);
7663 bacpy(&ev.addr.bdaddr, bdaddr);
7664 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7666 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7670 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7671 u8 link_type, u8 addr_type, u8 status,
7674 struct mgmt_pending_cmd *cmd;
7676 cmd = pending_find(opcode, hdev);
7680 cmd->cmd_complete(cmd, mgmt_status(status));
7681 mgmt_pending_remove(cmd);
7686 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7687 u8 link_type, u8 addr_type, u8 status)
7689 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7690 status, MGMT_OP_USER_CONFIRM_REPLY);
7693 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7694 u8 link_type, u8 addr_type, u8 status)
7696 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7698 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7701 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7702 u8 link_type, u8 addr_type, u8 status)
7704 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7705 status, MGMT_OP_USER_PASSKEY_REPLY);
7708 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7709 u8 link_type, u8 addr_type, u8 status)
7711 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7713 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7716 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7717 u8 link_type, u8 addr_type, u32 passkey,
7720 struct mgmt_ev_passkey_notify ev;
7722 BT_DBG("%s", hdev->name);
7724 bacpy(&ev.addr.bdaddr, bdaddr);
7725 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7726 ev.passkey = __cpu_to_le32(passkey);
7727 ev.entered = entered;
7729 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7732 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7734 struct mgmt_ev_auth_failed ev;
7735 struct mgmt_pending_cmd *cmd;
7736 u8 status = mgmt_status(hci_status);
7738 bacpy(&ev.addr.bdaddr, &conn->dst);
7739 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7742 cmd = find_pairing(conn);
7744 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7745 cmd ? cmd->sk : NULL);
7748 cmd->cmd_complete(cmd, status);
7749 mgmt_pending_remove(cmd);
7753 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7755 struct cmd_lookup match = { NULL, hdev };
7759 u8 mgmt_err = mgmt_status(status);
7760 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7761 cmd_status_rsp, &mgmt_err);
7765 if (test_bit(HCI_AUTH, &hdev->flags))
7766 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7768 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7770 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7774 new_settings(hdev, match.sk);
7780 static void clear_eir(struct hci_request *req)
7782 struct hci_dev *hdev = req->hdev;
7783 struct hci_cp_write_eir cp;
7785 if (!lmp_ext_inq_capable(hdev))
7788 memset(hdev->eir, 0, sizeof(hdev->eir));
7790 memset(&cp, 0, sizeof(cp));
7792 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7795 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7797 struct cmd_lookup match = { NULL, hdev };
7798 struct hci_request req;
7799 bool changed = false;
7802 u8 mgmt_err = mgmt_status(status);
7804 if (enable && hci_dev_test_and_clear_flag(hdev,
7806 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7807 new_settings(hdev, NULL);
7810 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7816 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7818 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7820 changed = hci_dev_test_and_clear_flag(hdev,
7823 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7826 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7829 new_settings(hdev, match.sk);
7834 hci_req_init(&req, hdev);
7836 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7837 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7838 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7839 sizeof(enable), &enable);
7845 hci_req_run(&req, NULL);
7848 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7850 struct cmd_lookup *match = data;
7852 if (match->sk == NULL) {
7853 match->sk = cmd->sk;
7854 sock_hold(match->sk);
7858 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7861 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7863 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7864 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7865 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7868 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7869 dev_class, 3, NULL);
7875 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7877 struct mgmt_cp_set_local_name ev;
7878 struct mgmt_pending_cmd *cmd;
7883 memset(&ev, 0, sizeof(ev));
7884 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7885 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7887 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7889 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7891 /* If this is a HCI command related to powering on the
7892 * HCI dev don't send any mgmt signals.
7894 if (pending_find(MGMT_OP_SET_POWERED, hdev))
7898 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7899 cmd ? cmd->sk : NULL);
7902 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7903 u8 *rand192, u8 *hash256, u8 *rand256,
7906 struct mgmt_pending_cmd *cmd;
7908 BT_DBG("%s status %u", hdev->name, status);
7910 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7915 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7916 mgmt_status(status));
7918 struct mgmt_rp_read_local_oob_data rp;
7919 size_t rp_size = sizeof(rp);
7921 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7922 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7924 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7925 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7926 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7928 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7931 mgmt_cmd_complete(cmd->sk, hdev->id,
7932 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7936 mgmt_pending_remove(cmd);
7939 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7943 for (i = 0; i < uuid_count; i++) {
7944 if (!memcmp(uuid, uuids[i], 16))
7951 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7955 while (parsed < eir_len) {
7956 u8 field_len = eir[0];
7963 if (eir_len - parsed < field_len + 1)
7967 case EIR_UUID16_ALL:
7968 case EIR_UUID16_SOME:
7969 for (i = 0; i + 3 <= field_len; i += 2) {
7970 memcpy(uuid, bluetooth_base_uuid, 16);
7971 uuid[13] = eir[i + 3];
7972 uuid[12] = eir[i + 2];
7973 if (has_uuid(uuid, uuid_count, uuids))
7977 case EIR_UUID32_ALL:
7978 case EIR_UUID32_SOME:
7979 for (i = 0; i + 5 <= field_len; i += 4) {
7980 memcpy(uuid, bluetooth_base_uuid, 16);
7981 uuid[15] = eir[i + 5];
7982 uuid[14] = eir[i + 4];
7983 uuid[13] = eir[i + 3];
7984 uuid[12] = eir[i + 2];
7985 if (has_uuid(uuid, uuid_count, uuids))
7989 case EIR_UUID128_ALL:
7990 case EIR_UUID128_SOME:
7991 for (i = 0; i + 17 <= field_len; i += 16) {
7992 memcpy(uuid, eir + i + 2, 16);
7993 if (has_uuid(uuid, uuid_count, uuids))
7999 parsed += field_len + 1;
8000 eir += field_len + 1;
8006 static void restart_le_scan(struct hci_dev *hdev)
8008 /* If controller is not scanning we are done. */
8009 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8012 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8013 hdev->discovery.scan_start +
8014 hdev->discovery.scan_duration))
8017 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
8018 DISCOV_LE_RESTART_DELAY);
8021 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8022 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8024 /* If a RSSI threshold has been specified, and
8025 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8026 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8027 * is set, let it through for further processing, as we might need to
8030 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8031 * the results are also dropped.
8033 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8034 (rssi == HCI_RSSI_INVALID ||
8035 (rssi < hdev->discovery.rssi &&
8036 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8039 if (hdev->discovery.uuid_count != 0) {
8040 /* If a list of UUIDs is provided in filter, results with no
8041 * matching UUID should be dropped.
8043 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8044 hdev->discovery.uuids) &&
8045 !eir_has_uuids(scan_rsp, scan_rsp_len,
8046 hdev->discovery.uuid_count,
8047 hdev->discovery.uuids))
8051 /* If duplicate filtering does not report RSSI changes, then restart
8052 * scanning to ensure updated result with updated RSSI values.
8054 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8055 restart_le_scan(hdev);
8057 /* Validate RSSI value against the RSSI threshold once more. */
8058 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8059 rssi < hdev->discovery.rssi)
8066 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8067 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8068 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8071 struct mgmt_ev_device_found *ev = (void *)buf;
8074 /* Don't send events for a non-kernel initiated discovery. With
8075 * LE one exception is if we have pend_le_reports > 0 in which
8076 * case we're doing passive scanning and want these events.
8078 if (!hci_discovery_active(hdev)) {
8079 if (link_type == ACL_LINK)
8081 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
8085 if (hdev->discovery.result_filtering) {
8086 /* We are using service discovery */
8087 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8092 /* Make sure that the buffer is big enough. The 5 extra bytes
8093 * are for the potential CoD field.
8095 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8098 memset(buf, 0, sizeof(buf));
8100 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8101 * RSSI value was reported as 0 when not available. This behavior
8102 * is kept when using device discovery. This is required for full
8103 * backwards compatibility with the API.
8105 * However when using service discovery, the value 127 will be
8106 * returned when the RSSI is not available.
8108 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8109 link_type == ACL_LINK)
8112 bacpy(&ev->addr.bdaddr, bdaddr);
8113 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8115 ev->flags = cpu_to_le32(flags);
8118 /* Copy EIR or advertising data into event */
8119 memcpy(ev->eir, eir, eir_len);
8121 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
8122 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8125 if (scan_rsp_len > 0)
8126 /* Append scan response data to event */
8127 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8129 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8130 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8132 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8135 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8136 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8138 struct mgmt_ev_device_found *ev;
8139 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8142 ev = (struct mgmt_ev_device_found *) buf;
8144 memset(buf, 0, sizeof(buf));
8146 bacpy(&ev->addr.bdaddr, bdaddr);
8147 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8150 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8153 ev->eir_len = cpu_to_le16(eir_len);
8155 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8158 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8160 struct mgmt_ev_discovering ev;
8162 BT_DBG("%s discovering %u", hdev->name, discovering);
8164 memset(&ev, 0, sizeof(ev));
8165 ev.type = hdev->discovery.type;
8166 ev.discovering = discovering;
8168 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8171 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8173 BT_DBG("%s status %u", hdev->name, status);
8176 void mgmt_reenable_advertising(struct hci_dev *hdev)
8178 struct hci_request req;
8180 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
8181 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
8184 hci_req_init(&req, hdev);
8185 enable_advertising(&req);
8186 hci_req_run(&req, adv_enable_complete);
8189 static struct hci_mgmt_chan chan = {
8190 .channel = HCI_CHANNEL_CONTROL,
8191 .handler_count = ARRAY_SIZE(mgmt_handlers),
8192 .handlers = mgmt_handlers,
8193 .hdev_init = mgmt_init_hdev,
8198 return hci_mgmt_chan_register(&chan);
8201 void mgmt_exit(void)
8203 hci_mgmt_chan_unregister(&chan);