2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 9
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
51 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
107 static const u16 mgmt_events[] = {
108 MGMT_EV_CONTROLLER_ERROR,
110 MGMT_EV_INDEX_REMOVED,
111 MGMT_EV_NEW_SETTINGS,
112 MGMT_EV_CLASS_OF_DEV_CHANGED,
113 MGMT_EV_LOCAL_NAME_CHANGED,
114 MGMT_EV_NEW_LINK_KEY,
115 MGMT_EV_NEW_LONG_TERM_KEY,
116 MGMT_EV_DEVICE_CONNECTED,
117 MGMT_EV_DEVICE_DISCONNECTED,
118 MGMT_EV_CONNECT_FAILED,
119 MGMT_EV_PIN_CODE_REQUEST,
120 MGMT_EV_USER_CONFIRM_REQUEST,
121 MGMT_EV_USER_PASSKEY_REQUEST,
123 MGMT_EV_DEVICE_FOUND,
125 MGMT_EV_DEVICE_BLOCKED,
126 MGMT_EV_DEVICE_UNBLOCKED,
127 MGMT_EV_DEVICE_UNPAIRED,
128 MGMT_EV_PASSKEY_NOTIFY,
131 MGMT_EV_DEVICE_ADDED,
132 MGMT_EV_DEVICE_REMOVED,
133 MGMT_EV_NEW_CONN_PARAM,
134 MGMT_EV_UNCONF_INDEX_ADDED,
135 MGMT_EV_UNCONF_INDEX_REMOVED,
136 MGMT_EV_NEW_CONFIG_OPTIONS,
137 MGMT_EV_EXT_INDEX_ADDED,
138 MGMT_EV_EXT_INDEX_REMOVED,
139 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 MGMT_EV_ADVERTISING_ADDED,
141 MGMT_EV_ADVERTISING_REMOVED,
144 static const u16 mgmt_untrusted_commands[] = {
145 MGMT_OP_READ_INDEX_LIST,
147 MGMT_OP_READ_UNCONF_INDEX_LIST,
148 MGMT_OP_READ_CONFIG_INFO,
149 MGMT_OP_READ_EXT_INDEX_LIST,
152 static const u16 mgmt_untrusted_events[] = {
154 MGMT_EV_INDEX_REMOVED,
155 MGMT_EV_NEW_SETTINGS,
156 MGMT_EV_CLASS_OF_DEV_CHANGED,
157 MGMT_EV_LOCAL_NAME_CHANGED,
158 MGMT_EV_UNCONF_INDEX_ADDED,
159 MGMT_EV_UNCONF_INDEX_REMOVED,
160 MGMT_EV_NEW_CONFIG_OPTIONS,
161 MGMT_EV_EXT_INDEX_ADDED,
162 MGMT_EV_EXT_INDEX_REMOVED,
165 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 "\x00\x00\x00\x00\x00\x00\x00\x00"
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table[] = {
173 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
174 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
175 MGMT_STATUS_FAILED, /* Hardware Failure */
176 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
177 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
178 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
179 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
180 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
181 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
182 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
183 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
184 MGMT_STATUS_BUSY, /* Command Disallowed */
185 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
186 MGMT_STATUS_REJECTED, /* Rejected Security */
187 MGMT_STATUS_REJECTED, /* Rejected Personal */
188 MGMT_STATUS_TIMEOUT, /* Host Timeout */
189 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
190 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
191 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
192 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
193 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
194 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
195 MGMT_STATUS_BUSY, /* Repeated Attempts */
196 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
197 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
198 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
199 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
200 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
201 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
202 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
203 MGMT_STATUS_FAILED, /* Unspecified Error */
204 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
205 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
206 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
207 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
208 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
209 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
210 MGMT_STATUS_FAILED, /* Unit Link Key Used */
211 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
212 MGMT_STATUS_TIMEOUT, /* Instant Passed */
213 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
214 MGMT_STATUS_FAILED, /* Transaction Collision */
215 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
216 MGMT_STATUS_REJECTED, /* QoS Rejected */
217 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
218 MGMT_STATUS_REJECTED, /* Insufficient Security */
219 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
220 MGMT_STATUS_BUSY, /* Role Switch Pending */
221 MGMT_STATUS_FAILED, /* Slot Violation */
222 MGMT_STATUS_FAILED, /* Role Switch Failed */
223 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
224 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
225 MGMT_STATUS_BUSY, /* Host Busy Pairing */
226 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
227 MGMT_STATUS_BUSY, /* Controller Busy */
228 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
229 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
230 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
231 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
232 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
235 static u8 mgmt_status(u8 hci_status)
237 if (hci_status < ARRAY_SIZE(mgmt_status_table))
238 return mgmt_status_table[hci_status];
240 return MGMT_STATUS_FAILED;
243 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
246 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
250 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
251 u16 len, int flag, struct sock *skip_sk)
253 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
257 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
258 u16 len, struct sock *skip_sk)
260 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 HCI_MGMT_GENERIC_EVENTS, skip_sk);
264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 struct sock *skip_sk)
267 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 HCI_SOCK_TRUSTED, skip_sk);
271 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
274 struct mgmt_rp_read_version rp;
276 BT_DBG("sock %p", sk);
278 rp.version = MGMT_VERSION;
279 rp.revision = cpu_to_le16(MGMT_REVISION);
281 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
285 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
288 struct mgmt_rp_read_commands *rp;
289 u16 num_commands, num_events;
293 BT_DBG("sock %p", sk);
295 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
296 num_commands = ARRAY_SIZE(mgmt_commands);
297 num_events = ARRAY_SIZE(mgmt_events);
299 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
300 num_events = ARRAY_SIZE(mgmt_untrusted_events);
303 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
305 rp = kmalloc(rp_size, GFP_KERNEL);
309 rp->num_commands = cpu_to_le16(num_commands);
310 rp->num_events = cpu_to_le16(num_events);
312 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
313 __le16 *opcode = rp->opcodes;
315 for (i = 0; i < num_commands; i++, opcode++)
316 put_unaligned_le16(mgmt_commands[i], opcode);
318 for (i = 0; i < num_events; i++, opcode++)
319 put_unaligned_le16(mgmt_events[i], opcode);
321 __le16 *opcode = rp->opcodes;
323 for (i = 0; i < num_commands; i++, opcode++)
324 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
326 for (i = 0; i < num_events; i++, opcode++)
327 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
330 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
337 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
340 struct mgmt_rp_read_index_list *rp;
346 BT_DBG("sock %p", sk);
348 read_lock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (d->dev_type == HCI_BREDR &&
353 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
357 rp_len = sizeof(*rp) + (2 * count);
358 rp = kmalloc(rp_len, GFP_ATOMIC);
360 read_unlock(&hci_dev_list_lock);
365 list_for_each_entry(d, &hci_dev_list, list) {
366 if (hci_dev_test_flag(d, HCI_SETUP) ||
367 hci_dev_test_flag(d, HCI_CONFIG) ||
368 hci_dev_test_flag(d, HCI_USER_CHANNEL))
371 /* Devices marked as raw-only are neither configured
372 * nor unconfigured controllers.
374 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
377 if (d->dev_type == HCI_BREDR &&
378 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
379 rp->index[count++] = cpu_to_le16(d->id);
380 BT_DBG("Added hci%u", d->id);
384 rp->num_controllers = cpu_to_le16(count);
385 rp_len = sizeof(*rp) + (2 * count);
387 read_unlock(&hci_dev_list_lock);
389 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
397 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
398 void *data, u16 data_len)
400 struct mgmt_rp_read_unconf_index_list *rp;
406 BT_DBG("sock %p", sk);
408 read_lock(&hci_dev_list_lock);
411 list_for_each_entry(d, &hci_dev_list, list) {
412 if (d->dev_type == HCI_BREDR &&
413 hci_dev_test_flag(d, HCI_UNCONFIGURED))
417 rp_len = sizeof(*rp) + (2 * count);
418 rp = kmalloc(rp_len, GFP_ATOMIC);
420 read_unlock(&hci_dev_list_lock);
425 list_for_each_entry(d, &hci_dev_list, list) {
426 if (hci_dev_test_flag(d, HCI_SETUP) ||
427 hci_dev_test_flag(d, HCI_CONFIG) ||
428 hci_dev_test_flag(d, HCI_USER_CHANNEL))
431 /* Devices marked as raw-only are neither configured
432 * nor unconfigured controllers.
434 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
437 if (d->dev_type == HCI_BREDR &&
438 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
439 rp->index[count++] = cpu_to_le16(d->id);
440 BT_DBG("Added hci%u", d->id);
444 rp->num_controllers = cpu_to_le16(count);
445 rp_len = sizeof(*rp) + (2 * count);
447 read_unlock(&hci_dev_list_lock);
449 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
450 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
457 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
458 void *data, u16 data_len)
460 struct mgmt_rp_read_ext_index_list *rp;
466 BT_DBG("sock %p", sk);
468 read_lock(&hci_dev_list_lock);
471 list_for_each_entry(d, &hci_dev_list, list) {
472 if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
476 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
477 rp = kmalloc(rp_len, GFP_ATOMIC);
479 read_unlock(&hci_dev_list_lock);
484 list_for_each_entry(d, &hci_dev_list, list) {
485 if (hci_dev_test_flag(d, HCI_SETUP) ||
486 hci_dev_test_flag(d, HCI_CONFIG) ||
487 hci_dev_test_flag(d, HCI_USER_CHANNEL))
490 /* Devices marked as raw-only are neither configured
491 * nor unconfigured controllers.
493 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
496 if (d->dev_type == HCI_BREDR) {
497 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
498 rp->entry[count].type = 0x01;
500 rp->entry[count].type = 0x00;
501 } else if (d->dev_type == HCI_AMP) {
502 rp->entry[count].type = 0x02;
507 rp->entry[count].bus = d->bus;
508 rp->entry[count++].index = cpu_to_le16(d->id);
509 BT_DBG("Added hci%u", d->id);
512 rp->num_controllers = cpu_to_le16(count);
513 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
515 read_unlock(&hci_dev_list_lock);
517 /* If this command is called at least once, then all the
518 * default index and unconfigured index events are disabled
519 * and from now on only extended index events are used.
521 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
522 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
523 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
525 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
526 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
533 static bool is_configured(struct hci_dev *hdev)
535 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
536 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
539 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
540 !bacmp(&hdev->public_addr, BDADDR_ANY))
546 static __le32 get_missing_options(struct hci_dev *hdev)
550 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
552 options |= MGMT_OPTION_EXTERNAL_CONFIG;
554 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
555 !bacmp(&hdev->public_addr, BDADDR_ANY))
556 options |= MGMT_OPTION_PUBLIC_ADDRESS;
558 return cpu_to_le32(options);
561 static int new_options(struct hci_dev *hdev, struct sock *skip)
563 __le32 options = get_missing_options(hdev);
565 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
566 sizeof(options), skip);
569 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
571 __le32 options = get_missing_options(hdev);
573 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
577 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
578 void *data, u16 data_len)
580 struct mgmt_rp_read_config_info rp;
583 BT_DBG("sock %p %s", sk, hdev->name);
587 memset(&rp, 0, sizeof(rp));
588 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
590 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
591 options |= MGMT_OPTION_EXTERNAL_CONFIG;
593 if (hdev->set_bdaddr)
594 options |= MGMT_OPTION_PUBLIC_ADDRESS;
596 rp.supported_options = cpu_to_le32(options);
597 rp.missing_options = get_missing_options(hdev);
599 hci_dev_unlock(hdev);
601 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
605 static u32 get_supported_settings(struct hci_dev *hdev)
609 settings |= MGMT_SETTING_POWERED;
610 settings |= MGMT_SETTING_BONDABLE;
611 settings |= MGMT_SETTING_DEBUG_KEYS;
612 settings |= MGMT_SETTING_CONNECTABLE;
613 settings |= MGMT_SETTING_DISCOVERABLE;
615 if (lmp_bredr_capable(hdev)) {
616 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
617 settings |= MGMT_SETTING_FAST_CONNECTABLE;
618 settings |= MGMT_SETTING_BREDR;
619 settings |= MGMT_SETTING_LINK_SECURITY;
621 if (lmp_ssp_capable(hdev)) {
622 settings |= MGMT_SETTING_SSP;
623 settings |= MGMT_SETTING_HS;
626 if (lmp_sc_capable(hdev))
627 settings |= MGMT_SETTING_SECURE_CONN;
630 if (lmp_le_capable(hdev)) {
631 settings |= MGMT_SETTING_LE;
632 settings |= MGMT_SETTING_ADVERTISING;
633 settings |= MGMT_SETTING_SECURE_CONN;
634 settings |= MGMT_SETTING_PRIVACY;
635 settings |= MGMT_SETTING_STATIC_ADDRESS;
638 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
640 settings |= MGMT_SETTING_CONFIGURATION;
645 static u32 get_current_settings(struct hci_dev *hdev)
649 if (hdev_is_powered(hdev))
650 settings |= MGMT_SETTING_POWERED;
652 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
653 settings |= MGMT_SETTING_CONNECTABLE;
655 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
656 settings |= MGMT_SETTING_FAST_CONNECTABLE;
658 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
659 settings |= MGMT_SETTING_DISCOVERABLE;
661 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
662 settings |= MGMT_SETTING_BONDABLE;
664 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
665 settings |= MGMT_SETTING_BREDR;
667 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
668 settings |= MGMT_SETTING_LE;
670 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
671 settings |= MGMT_SETTING_LINK_SECURITY;
673 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
674 settings |= MGMT_SETTING_SSP;
676 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
677 settings |= MGMT_SETTING_HS;
679 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
680 settings |= MGMT_SETTING_ADVERTISING;
682 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
683 settings |= MGMT_SETTING_SECURE_CONN;
685 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
686 settings |= MGMT_SETTING_DEBUG_KEYS;
688 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
689 settings |= MGMT_SETTING_PRIVACY;
691 /* The current setting for static address has two purposes. The
692 * first is to indicate if the static address will be used and
693 * the second is to indicate if it is actually set.
695 * This means if the static address is not configured, this flag
696 * will never be set. If the address is configured, then if the
697 * address is actually used decides if the flag is set or not.
699 * For single mode LE only controllers and dual-mode controllers
700 * with BR/EDR disabled, the existence of the static address will
703 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
704 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
705 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
706 if (bacmp(&hdev->static_addr, BDADDR_ANY))
707 settings |= MGMT_SETTING_STATIC_ADDRESS;
713 #define PNP_INFO_SVCLASS_ID 0x1200
715 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
717 u8 *ptr = data, *uuids_start = NULL;
718 struct bt_uuid *uuid;
723 list_for_each_entry(uuid, &hdev->uuids, list) {
726 if (uuid->size != 16)
729 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
733 if (uuid16 == PNP_INFO_SVCLASS_ID)
739 uuids_start[1] = EIR_UUID16_ALL;
743 /* Stop if not enough space to put next UUID */
744 if ((ptr - data) + sizeof(u16) > len) {
745 uuids_start[1] = EIR_UUID16_SOME;
749 *ptr++ = (uuid16 & 0x00ff);
750 *ptr++ = (uuid16 & 0xff00) >> 8;
751 uuids_start[0] += sizeof(uuid16);
757 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
759 u8 *ptr = data, *uuids_start = NULL;
760 struct bt_uuid *uuid;
765 list_for_each_entry(uuid, &hdev->uuids, list) {
766 if (uuid->size != 32)
772 uuids_start[1] = EIR_UUID32_ALL;
776 /* Stop if not enough space to put next UUID */
777 if ((ptr - data) + sizeof(u32) > len) {
778 uuids_start[1] = EIR_UUID32_SOME;
782 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
784 uuids_start[0] += sizeof(u32);
790 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
792 u8 *ptr = data, *uuids_start = NULL;
793 struct bt_uuid *uuid;
798 list_for_each_entry(uuid, &hdev->uuids, list) {
799 if (uuid->size != 128)
805 uuids_start[1] = EIR_UUID128_ALL;
809 /* Stop if not enough space to put next UUID */
810 if ((ptr - data) + 16 > len) {
811 uuids_start[1] = EIR_UUID128_SOME;
815 memcpy(ptr, uuid->uuid, 16);
817 uuids_start[0] += 16;
823 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
825 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
828 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
829 struct hci_dev *hdev,
832 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
835 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
840 name_len = strlen(hdev->dev_name);
842 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
844 if (name_len > max_len) {
846 ptr[1] = EIR_NAME_SHORT;
848 ptr[1] = EIR_NAME_COMPLETE;
850 ptr[0] = name_len + 1;
852 memcpy(ptr + 2, hdev->dev_name, name_len);
854 ad_len += (name_len + 2);
855 ptr += (name_len + 2);
861 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
863 /* TODO: Set the appropriate entries based on advertising instance flags
864 * here once flags other than 0 are supported.
866 memcpy(ptr, hdev->adv_instance.scan_rsp_data,
867 hdev->adv_instance.scan_rsp_len);
869 return hdev->adv_instance.scan_rsp_len;
872 static void update_scan_rsp_data_for_instance(struct hci_request *req,
875 struct hci_dev *hdev = req->hdev;
876 struct hci_cp_le_set_scan_rsp_data cp;
879 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 memset(&cp, 0, sizeof(cp));
885 len = create_instance_scan_rsp_data(hdev, cp.data);
887 len = create_default_scan_rsp_data(hdev, cp.data);
889 if (hdev->scan_rsp_data_len == len &&
890 !memcmp(cp.data, hdev->scan_rsp_data, len))
893 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
894 hdev->scan_rsp_data_len = len;
898 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
901 static void update_scan_rsp_data(struct hci_request *req)
903 struct hci_dev *hdev = req->hdev;
906 /* The "Set Advertising" setting supersedes the "Add Advertising"
907 * setting. Here we set the scan response data based on which
908 * setting was set. When neither apply, default to the global settings,
909 * represented by instance "0".
911 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
912 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
917 update_scan_rsp_data_for_instance(req, instance);
920 static u8 get_adv_discov_flags(struct hci_dev *hdev)
922 struct mgmt_pending_cmd *cmd;
924 /* If there's a pending mgmt command the flags will not yet have
925 * their final values, so check for this first.
927 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
929 struct mgmt_mode *cp = cmd->param;
931 return LE_AD_GENERAL;
932 else if (cp->val == 0x02)
933 return LE_AD_LIMITED;
935 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
936 return LE_AD_LIMITED;
937 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
938 return LE_AD_GENERAL;
944 static u8 create_default_adv_data(struct hci_dev *hdev, u8 *ptr)
946 u8 ad_len = 0, flags = 0;
948 flags |= get_adv_discov_flags(hdev);
950 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
951 flags |= LE_AD_NO_BREDR;
954 BT_DBG("adv flags 0x%02x", flags);
964 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
966 ptr[1] = EIR_TX_POWER;
967 ptr[2] = (u8) hdev->adv_tx_power;
976 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 *ptr)
978 u8 ad_len = 0, flags = 0;
980 /* The Add Advertising command allows userspace to set both the general
981 * and limited discoverable flags.
983 if (hdev->adv_instance.flags & MGMT_ADV_FLAG_DISCOV)
984 flags |= LE_AD_GENERAL;
986 if (hdev->adv_instance.flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
987 flags |= LE_AD_LIMITED;
989 if (flags || (hdev->adv_instance.flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
990 /* If a discovery flag wasn't provided, simply use the global
994 flags |= get_adv_discov_flags(hdev);
996 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
997 flags |= LE_AD_NO_BREDR;
1007 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1008 (hdev->adv_instance.flags & MGMT_ADV_FLAG_TX_POWER)) {
1010 ptr[1] = EIR_TX_POWER;
1011 ptr[2] = (u8)hdev->adv_tx_power;
1017 memcpy(ptr, hdev->adv_instance.adv_data,
1018 hdev->adv_instance.adv_data_len);
1019 ad_len += hdev->adv_instance.adv_data_len;
1024 static void update_adv_data_for_instance(struct hci_request *req, u8 instance)
1026 struct hci_dev *hdev = req->hdev;
1027 struct hci_cp_le_set_adv_data cp;
1030 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1033 memset(&cp, 0, sizeof(cp));
1036 len = create_instance_adv_data(hdev, cp.data);
1038 len = create_default_adv_data(hdev, cp.data);
1040 /* There's nothing to do if the data hasn't changed */
1041 if (hdev->adv_data_len == len &&
1042 memcmp(cp.data, hdev->adv_data, len) == 0)
1045 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1046 hdev->adv_data_len = len;
1050 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1053 static u8 get_current_adv_instance(struct hci_dev *hdev)
1055 /* The "Set Advertising" setting supersedes the "Add Advertising"
1056 * setting. Here we set the advertising data based on which
1057 * setting was set. When neither apply, default to the global settings,
1058 * represented by instance "0".
1060 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
1061 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
1067 static bool get_connectable(struct hci_dev *hdev)
1069 struct mgmt_pending_cmd *cmd;
1071 /* If there's a pending mgmt command the flag will not yet have
1072 * it's final value, so check for this first.
1074 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1076 struct mgmt_mode *cp = cmd->param;
1081 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1084 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1088 if (instance > 0x01)
1092 return hdev->adv_instance.flags;
1096 /* For instance 0, assemble the flags from global settings */
1097 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE) ||
1098 get_connectable(hdev))
1099 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1101 /* TODO: Add the rest of the flags */
1106 static void update_adv_data(struct hci_request *req)
1108 struct hci_dev *hdev = req->hdev;
1109 u8 instance = get_current_adv_instance(hdev);
1111 update_adv_data_for_instance(req, instance);
1114 int mgmt_update_adv_data(struct hci_dev *hdev)
1116 struct hci_request req;
1118 hci_req_init(&req, hdev);
1119 update_adv_data(&req);
1121 return hci_req_run(&req, NULL);
1124 static void create_eir(struct hci_dev *hdev, u8 *data)
1129 name_len = strlen(hdev->dev_name);
1133 if (name_len > 48) {
1135 ptr[1] = EIR_NAME_SHORT;
1137 ptr[1] = EIR_NAME_COMPLETE;
1139 /* EIR Data length */
1140 ptr[0] = name_len + 1;
1142 memcpy(ptr + 2, hdev->dev_name, name_len);
1144 ptr += (name_len + 2);
1147 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1149 ptr[1] = EIR_TX_POWER;
1150 ptr[2] = (u8) hdev->inq_tx_power;
1155 if (hdev->devid_source > 0) {
1157 ptr[1] = EIR_DEVICE_ID;
1159 put_unaligned_le16(hdev->devid_source, ptr + 2);
1160 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1161 put_unaligned_le16(hdev->devid_product, ptr + 6);
1162 put_unaligned_le16(hdev->devid_version, ptr + 8);
1167 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1168 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1169 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1172 static void update_eir(struct hci_request *req)
1174 struct hci_dev *hdev = req->hdev;
1175 struct hci_cp_write_eir cp;
1177 if (!hdev_is_powered(hdev))
1180 if (!lmp_ext_inq_capable(hdev))
1183 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1186 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1189 memset(&cp, 0, sizeof(cp));
1191 create_eir(hdev, cp.data);
1193 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1196 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1198 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1201 static u8 get_service_classes(struct hci_dev *hdev)
1203 struct bt_uuid *uuid;
1206 list_for_each_entry(uuid, &hdev->uuids, list)
1207 val |= uuid->svc_hint;
1212 static void update_class(struct hci_request *req)
1214 struct hci_dev *hdev = req->hdev;
1217 BT_DBG("%s", hdev->name);
1219 if (!hdev_is_powered(hdev))
1222 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1225 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1228 cod[0] = hdev->minor_class;
1229 cod[1] = hdev->major_class;
1230 cod[2] = get_service_classes(hdev);
1232 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1235 if (memcmp(cod, hdev->dev_class, 3) == 0)
1238 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1241 static void disable_advertising(struct hci_request *req)
1245 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1248 static void enable_advertising(struct hci_request *req)
1250 struct hci_dev *hdev = req->hdev;
1251 struct hci_cp_le_set_adv_param cp;
1252 u8 own_addr_type, enable = 0x01;
1257 if (hci_conn_num(hdev, LE_LINK) > 0)
1260 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1261 disable_advertising(req);
1263 /* Clear the HCI_LE_ADV bit temporarily so that the
1264 * hci_update_random_address knows that it's safe to go ahead
1265 * and write a new random address. The flag will be set back on
1266 * as soon as the SET_ADV_ENABLE HCI command completes.
1268 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1270 instance = get_current_adv_instance(hdev);
1271 flags = get_adv_instance_flags(hdev, instance);
1272 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE);
1274 /* Set require_privacy to true only when non-connectable
1275 * advertising is used. In that case it is fine to use a
1276 * non-resolvable private address.
1278 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1281 memset(&cp, 0, sizeof(cp));
1282 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1283 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1284 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1285 cp.own_address_type = own_addr_type;
1286 cp.channel_map = hdev->le_adv_channel_map;
1288 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1290 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1293 static void service_cache_off(struct work_struct *work)
1295 struct hci_dev *hdev = container_of(work, struct hci_dev,
1296 service_cache.work);
1297 struct hci_request req;
1299 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1302 hci_req_init(&req, hdev);
1309 hci_dev_unlock(hdev);
1311 hci_req_run(&req, NULL);
1314 static void rpa_expired(struct work_struct *work)
1316 struct hci_dev *hdev = container_of(work, struct hci_dev,
1318 struct hci_request req;
1322 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1324 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1327 /* The generation of a new RPA and programming it into the
1328 * controller happens in the enable_advertising() function.
1330 hci_req_init(&req, hdev);
1331 enable_advertising(&req);
1332 hci_req_run(&req, NULL);
1335 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1337 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1340 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1341 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1343 /* Non-mgmt controlled devices get this bit set
1344 * implicitly so that pairing works for them, however
1345 * for mgmt we require user-space to explicitly enable
1348 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1351 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1352 void *data, u16 data_len)
1354 struct mgmt_rp_read_info rp;
1356 BT_DBG("sock %p %s", sk, hdev->name);
1360 memset(&rp, 0, sizeof(rp));
1362 bacpy(&rp.bdaddr, &hdev->bdaddr);
1364 rp.version = hdev->hci_ver;
1365 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1367 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1368 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1370 memcpy(rp.dev_class, hdev->dev_class, 3);
1372 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1373 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1375 hci_dev_unlock(hdev);
1377 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1381 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1383 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1385 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1389 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1391 BT_DBG("%s status 0x%02x", hdev->name, status);
1393 if (hci_conn_count(hdev) == 0) {
1394 cancel_delayed_work(&hdev->power_off);
1395 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1399 static bool hci_stop_discovery(struct hci_request *req)
1401 struct hci_dev *hdev = req->hdev;
1402 struct hci_cp_remote_name_req_cancel cp;
1403 struct inquiry_entry *e;
1405 switch (hdev->discovery.state) {
1406 case DISCOVERY_FINDING:
1407 if (test_bit(HCI_INQUIRY, &hdev->flags))
1408 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1410 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1411 cancel_delayed_work(&hdev->le_scan_disable);
1412 hci_req_add_le_scan_disable(req);
1417 case DISCOVERY_RESOLVING:
1418 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1423 bacpy(&cp.bdaddr, &e->data.bdaddr);
1424 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1430 /* Passive scanning */
1431 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1432 hci_req_add_le_scan_disable(req);
1442 static void advertising_added(struct sock *sk, struct hci_dev *hdev,
1445 struct mgmt_ev_advertising_added ev;
1447 ev.instance = instance;
1449 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1452 static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
1455 struct mgmt_ev_advertising_removed ev;
1457 ev.instance = instance;
1459 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1462 static void clear_adv_instance(struct hci_dev *hdev)
1464 struct hci_request req;
1466 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1469 if (hdev->adv_instance.timeout)
1470 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
1472 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
1473 advertising_removed(NULL, hdev, 1);
1474 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1476 if (!hdev_is_powered(hdev) ||
1477 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1480 hci_req_init(&req, hdev);
1481 disable_advertising(&req);
1482 hci_req_run(&req, NULL);
1485 static int clean_up_hci_state(struct hci_dev *hdev)
1487 struct hci_request req;
1488 struct hci_conn *conn;
1489 bool discov_stopped;
1492 hci_req_init(&req, hdev);
1494 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1495 test_bit(HCI_PSCAN, &hdev->flags)) {
1497 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1500 if (hdev->adv_instance.timeout)
1501 clear_adv_instance(hdev);
1503 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1504 disable_advertising(&req);
1506 discov_stopped = hci_stop_discovery(&req);
1508 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1509 struct hci_cp_disconnect dc;
1510 struct hci_cp_reject_conn_req rej;
1512 switch (conn->state) {
1515 dc.handle = cpu_to_le16(conn->handle);
1516 dc.reason = 0x15; /* Terminated due to Power Off */
1517 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1520 if (conn->type == LE_LINK)
1521 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1523 else if (conn->type == ACL_LINK)
1524 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1528 bacpy(&rej.bdaddr, &conn->dst);
1529 rej.reason = 0x15; /* Terminated due to Power Off */
1530 if (conn->type == ACL_LINK)
1531 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1533 else if (conn->type == SCO_LINK)
1534 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1540 err = hci_req_run(&req, clean_up_hci_complete);
1541 if (!err && discov_stopped)
1542 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1547 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1550 struct mgmt_mode *cp = data;
1551 struct mgmt_pending_cmd *cmd;
1554 BT_DBG("request for %s", hdev->name);
1556 if (cp->val != 0x00 && cp->val != 0x01)
1557 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1558 MGMT_STATUS_INVALID_PARAMS);
1562 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1563 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1568 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1569 cancel_delayed_work(&hdev->power_off);
1572 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1574 err = mgmt_powered(hdev, 1);
1579 if (!!cp->val == hdev_is_powered(hdev)) {
1580 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1584 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1591 queue_work(hdev->req_workqueue, &hdev->power_on);
1594 /* Disconnect connections, stop scans, etc */
1595 err = clean_up_hci_state(hdev);
1597 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1598 HCI_POWER_OFF_TIMEOUT);
1600 /* ENODATA means there were no HCI commands queued */
1601 if (err == -ENODATA) {
1602 cancel_delayed_work(&hdev->power_off);
1603 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1609 hci_dev_unlock(hdev);
1613 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1615 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1617 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1621 int mgmt_new_settings(struct hci_dev *hdev)
1623 return new_settings(hdev, NULL);
1628 struct hci_dev *hdev;
1632 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1634 struct cmd_lookup *match = data;
1636 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1638 list_del(&cmd->list);
1640 if (match->sk == NULL) {
1641 match->sk = cmd->sk;
1642 sock_hold(match->sk);
1645 mgmt_pending_free(cmd);
1648 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1652 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1653 mgmt_pending_remove(cmd);
1656 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1658 if (cmd->cmd_complete) {
1661 cmd->cmd_complete(cmd, *status);
1662 mgmt_pending_remove(cmd);
1667 cmd_status_rsp(cmd, data);
1670 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1672 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1673 cmd->param, cmd->param_len);
1676 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1678 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1679 cmd->param, sizeof(struct mgmt_addr_info));
1682 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1684 if (!lmp_bredr_capable(hdev))
1685 return MGMT_STATUS_NOT_SUPPORTED;
1686 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1687 return MGMT_STATUS_REJECTED;
1689 return MGMT_STATUS_SUCCESS;
1692 static u8 mgmt_le_support(struct hci_dev *hdev)
1694 if (!lmp_le_capable(hdev))
1695 return MGMT_STATUS_NOT_SUPPORTED;
1696 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1697 return MGMT_STATUS_REJECTED;
1699 return MGMT_STATUS_SUCCESS;
1702 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1705 struct mgmt_pending_cmd *cmd;
1706 struct mgmt_mode *cp;
1707 struct hci_request req;
1710 BT_DBG("status 0x%02x", status);
1714 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1719 u8 mgmt_err = mgmt_status(status);
1720 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1721 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1727 changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1729 if (hdev->discov_timeout > 0) {
1730 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1731 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1735 changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1738 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1741 new_settings(hdev, cmd->sk);
1743 /* When the discoverable mode gets changed, make sure
1744 * that class of device has the limited discoverable
1745 * bit correctly set. Also update page scan based on whitelist
1748 hci_req_init(&req, hdev);
1749 __hci_update_page_scan(&req);
1751 hci_req_run(&req, NULL);
1754 mgmt_pending_remove(cmd);
1757 hci_dev_unlock(hdev);
1760 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1763 struct mgmt_cp_set_discoverable *cp = data;
1764 struct mgmt_pending_cmd *cmd;
1765 struct hci_request req;
1770 BT_DBG("request for %s", hdev->name);
1772 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1773 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1774 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1775 MGMT_STATUS_REJECTED);
1777 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1778 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1779 MGMT_STATUS_INVALID_PARAMS);
1781 timeout = __le16_to_cpu(cp->timeout);
1783 /* Disabling discoverable requires that no timeout is set,
1784 * and enabling limited discoverable requires a timeout.
1786 if ((cp->val == 0x00 && timeout > 0) ||
1787 (cp->val == 0x02 && timeout == 0))
1788 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1789 MGMT_STATUS_INVALID_PARAMS);
1793 if (!hdev_is_powered(hdev) && timeout > 0) {
1794 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1795 MGMT_STATUS_NOT_POWERED);
1799 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1800 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1801 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1806 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1807 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1808 MGMT_STATUS_REJECTED);
1812 if (!hdev_is_powered(hdev)) {
1813 bool changed = false;
1815 /* Setting limited discoverable when powered off is
1816 * not a valid operation since it requires a timeout
1817 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1819 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1820 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1824 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1829 err = new_settings(hdev, sk);
1834 /* If the current mode is the same, then just update the timeout
1835 * value with the new value. And if only the timeout gets updated,
1836 * then no need for any HCI transactions.
1838 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1839 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1840 HCI_LIMITED_DISCOVERABLE)) {
1841 cancel_delayed_work(&hdev->discov_off);
1842 hdev->discov_timeout = timeout;
1844 if (cp->val && hdev->discov_timeout > 0) {
1845 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1846 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1850 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1854 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1860 /* Cancel any potential discoverable timeout that might be
1861 * still active and store new timeout value. The arming of
1862 * the timeout happens in the complete handler.
1864 cancel_delayed_work(&hdev->discov_off);
1865 hdev->discov_timeout = timeout;
1867 /* Limited discoverable mode */
1868 if (cp->val == 0x02)
1869 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1871 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1873 hci_req_init(&req, hdev);
1875 /* The procedure for LE-only controllers is much simpler - just
1876 * update the advertising data.
1878 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1884 struct hci_cp_write_current_iac_lap hci_cp;
1886 if (cp->val == 0x02) {
1887 /* Limited discoverable mode */
1888 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1889 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1890 hci_cp.iac_lap[1] = 0x8b;
1891 hci_cp.iac_lap[2] = 0x9e;
1892 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1893 hci_cp.iac_lap[4] = 0x8b;
1894 hci_cp.iac_lap[5] = 0x9e;
1896 /* General discoverable mode */
1898 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1899 hci_cp.iac_lap[1] = 0x8b;
1900 hci_cp.iac_lap[2] = 0x9e;
1903 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1904 (hci_cp.num_iac * 3) + 1, &hci_cp);
1906 scan |= SCAN_INQUIRY;
1908 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1911 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1914 update_adv_data(&req);
1916 err = hci_req_run(&req, set_discoverable_complete);
1918 mgmt_pending_remove(cmd);
1921 hci_dev_unlock(hdev);
1925 static void write_fast_connectable(struct hci_request *req, bool enable)
1927 struct hci_dev *hdev = req->hdev;
1928 struct hci_cp_write_page_scan_activity acp;
1931 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1934 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1938 type = PAGE_SCAN_TYPE_INTERLACED;
1940 /* 160 msec page scan interval */
1941 acp.interval = cpu_to_le16(0x0100);
1943 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1945 /* default 1.28 sec page scan */
1946 acp.interval = cpu_to_le16(0x0800);
1949 acp.window = cpu_to_le16(0x0012);
1951 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1952 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1953 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1956 if (hdev->page_scan_type != type)
1957 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1960 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1963 struct mgmt_pending_cmd *cmd;
1964 struct mgmt_mode *cp;
1965 bool conn_changed, discov_changed;
1967 BT_DBG("status 0x%02x", status);
1971 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1976 u8 mgmt_err = mgmt_status(status);
1977 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1983 conn_changed = !hci_dev_test_and_set_flag(hdev,
1985 discov_changed = false;
1987 conn_changed = hci_dev_test_and_clear_flag(hdev,
1989 discov_changed = hci_dev_test_and_clear_flag(hdev,
1993 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1995 if (conn_changed || discov_changed) {
1996 new_settings(hdev, cmd->sk);
1997 hci_update_page_scan(hdev);
1999 mgmt_update_adv_data(hdev);
2000 hci_update_background_scan(hdev);
2004 mgmt_pending_remove(cmd);
2007 hci_dev_unlock(hdev);
2010 static int set_connectable_update_settings(struct hci_dev *hdev,
2011 struct sock *sk, u8 val)
2013 bool changed = false;
2016 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
2020 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
2022 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
2023 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2026 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
2031 hci_update_page_scan(hdev);
2032 hci_update_background_scan(hdev);
2033 return new_settings(hdev, sk);
2039 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
2042 struct mgmt_mode *cp = data;
2043 struct mgmt_pending_cmd *cmd;
2044 struct hci_request req;
2048 BT_DBG("request for %s", hdev->name);
2050 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2051 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2052 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2053 MGMT_STATUS_REJECTED);
2055 if (cp->val != 0x00 && cp->val != 0x01)
2056 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2057 MGMT_STATUS_INVALID_PARAMS);
2061 if (!hdev_is_powered(hdev)) {
2062 err = set_connectable_update_settings(hdev, sk, cp->val);
2066 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
2067 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2068 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2073 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2079 hci_req_init(&req, hdev);
2081 /* If BR/EDR is not enabled and we disable advertising as a
2082 * by-product of disabling connectable, we need to update the
2083 * advertising flags.
2085 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2087 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2088 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2090 update_adv_data(&req);
2091 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2095 /* If we don't have any whitelist entries just
2096 * disable all scanning. If there are entries
2097 * and we had both page and inquiry scanning
2098 * enabled then fall back to only page scanning.
2099 * Otherwise no changes are needed.
2101 if (list_empty(&hdev->whitelist))
2102 scan = SCAN_DISABLED;
2103 else if (test_bit(HCI_ISCAN, &hdev->flags))
2106 goto no_scan_update;
2108 if (test_bit(HCI_ISCAN, &hdev->flags) &&
2109 hdev->discov_timeout > 0)
2110 cancel_delayed_work(&hdev->discov_off);
2113 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2117 /* Update the advertising parameters if necessary */
2118 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2119 enable_advertising(&req);
2121 err = hci_req_run(&req, set_connectable_complete);
2123 mgmt_pending_remove(cmd);
2124 if (err == -ENODATA)
2125 err = set_connectable_update_settings(hdev, sk,
2131 hci_dev_unlock(hdev);
2135 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2138 struct mgmt_mode *cp = data;
2142 BT_DBG("request for %s", hdev->name);
2144 if (cp->val != 0x00 && cp->val != 0x01)
2145 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2146 MGMT_STATUS_INVALID_PARAMS);
2151 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2153 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2155 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2160 err = new_settings(hdev, sk);
2163 hci_dev_unlock(hdev);
2167 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2170 struct mgmt_mode *cp = data;
2171 struct mgmt_pending_cmd *cmd;
2175 BT_DBG("request for %s", hdev->name);
2177 status = mgmt_bredr_support(hdev);
2179 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2182 if (cp->val != 0x00 && cp->val != 0x01)
2183 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2184 MGMT_STATUS_INVALID_PARAMS);
2188 if (!hdev_is_powered(hdev)) {
2189 bool changed = false;
2191 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2192 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2196 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2201 err = new_settings(hdev, sk);
2206 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2207 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2214 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2215 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2219 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2225 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2227 mgmt_pending_remove(cmd);
2232 hci_dev_unlock(hdev);
2236 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2238 struct mgmt_mode *cp = data;
2239 struct mgmt_pending_cmd *cmd;
2243 BT_DBG("request for %s", hdev->name);
2245 status = mgmt_bredr_support(hdev);
2247 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2249 if (!lmp_ssp_capable(hdev))
2250 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2251 MGMT_STATUS_NOT_SUPPORTED);
2253 if (cp->val != 0x00 && cp->val != 0x01)
2254 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2255 MGMT_STATUS_INVALID_PARAMS);
2259 if (!hdev_is_powered(hdev)) {
2263 changed = !hci_dev_test_and_set_flag(hdev,
2266 changed = hci_dev_test_and_clear_flag(hdev,
2269 changed = hci_dev_test_and_clear_flag(hdev,
2272 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2275 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2280 err = new_settings(hdev, sk);
2285 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2286 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2291 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2292 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2296 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2302 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2303 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2304 sizeof(cp->val), &cp->val);
2306 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2308 mgmt_pending_remove(cmd);
2313 hci_dev_unlock(hdev);
2317 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2319 struct mgmt_mode *cp = data;
2324 BT_DBG("request for %s", hdev->name);
2326 status = mgmt_bredr_support(hdev);
2328 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2330 if (!lmp_ssp_capable(hdev))
2331 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2332 MGMT_STATUS_NOT_SUPPORTED);
2334 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2335 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2336 MGMT_STATUS_REJECTED);
2338 if (cp->val != 0x00 && cp->val != 0x01)
2339 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2340 MGMT_STATUS_INVALID_PARAMS);
2344 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2345 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2351 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2353 if (hdev_is_powered(hdev)) {
2354 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2355 MGMT_STATUS_REJECTED);
2359 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2362 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2367 err = new_settings(hdev, sk);
2370 hci_dev_unlock(hdev);
2374 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2376 struct cmd_lookup match = { NULL, hdev };
2381 u8 mgmt_err = mgmt_status(status);
2383 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2388 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2390 new_settings(hdev, match.sk);
2395 /* Make sure the controller has a good default for
2396 * advertising data. Restrict the update to when LE
2397 * has actually been enabled. During power on, the
2398 * update in powered_update_hci will take care of it.
2400 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2401 struct hci_request req;
2403 hci_req_init(&req, hdev);
2404 update_adv_data(&req);
2405 update_scan_rsp_data(&req);
2406 __hci_update_background_scan(&req);
2407 hci_req_run(&req, NULL);
2411 hci_dev_unlock(hdev);
2414 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2416 struct mgmt_mode *cp = data;
2417 struct hci_cp_write_le_host_supported hci_cp;
2418 struct mgmt_pending_cmd *cmd;
2419 struct hci_request req;
2423 BT_DBG("request for %s", hdev->name);
2425 if (!lmp_le_capable(hdev))
2426 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2427 MGMT_STATUS_NOT_SUPPORTED);
2429 if (cp->val != 0x00 && cp->val != 0x01)
2430 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2431 MGMT_STATUS_INVALID_PARAMS);
2433 /* Bluetooth single mode LE only controllers or dual-mode
2434 * controllers configured as LE only devices, do not allow
2435 * switching LE off. These have either LE enabled explicitly
2436 * or BR/EDR has been previously switched off.
2438 * When trying to enable an already enabled LE, then gracefully
2439 * send a positive response. Trying to disable it however will
2440 * result into rejection.
2442 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2443 if (cp->val == 0x01)
2444 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2446 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2447 MGMT_STATUS_REJECTED);
2453 enabled = lmp_host_le_capable(hdev);
2455 if (!hdev_is_powered(hdev) || val == enabled) {
2456 bool changed = false;
2458 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2459 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2463 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2464 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2468 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2473 err = new_settings(hdev, sk);
2478 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2479 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2480 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2485 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2491 hci_req_init(&req, hdev);
2493 memset(&hci_cp, 0, sizeof(hci_cp));
2497 hci_cp.simul = 0x00;
2499 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2500 disable_advertising(&req);
2503 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2506 err = hci_req_run(&req, le_enable_complete);
2508 mgmt_pending_remove(cmd);
2511 hci_dev_unlock(hdev);
2515 /* This is a helper function to test for pending mgmt commands that can
2516 * cause CoD or EIR HCI commands. We can only allow one such pending
2517 * mgmt command at a time since otherwise we cannot easily track what
2518 * the current values are, will be, and based on that calculate if a new
2519 * HCI command needs to be sent and if yes with what value.
2521 static bool pending_eir_or_class(struct hci_dev *hdev)
2523 struct mgmt_pending_cmd *cmd;
2525 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2526 switch (cmd->opcode) {
2527 case MGMT_OP_ADD_UUID:
2528 case MGMT_OP_REMOVE_UUID:
2529 case MGMT_OP_SET_DEV_CLASS:
2530 case MGMT_OP_SET_POWERED:
2538 static const u8 bluetooth_base_uuid[] = {
2539 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2540 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2543 static u8 get_uuid_size(const u8 *uuid)
2547 if (memcmp(uuid, bluetooth_base_uuid, 12))
2550 val = get_unaligned_le32(&uuid[12]);
2557 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2559 struct mgmt_pending_cmd *cmd;
2563 cmd = pending_find(mgmt_op, hdev);
2567 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2568 mgmt_status(status), hdev->dev_class, 3);
2570 mgmt_pending_remove(cmd);
2573 hci_dev_unlock(hdev);
2576 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2578 BT_DBG("status 0x%02x", status);
2580 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2583 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2585 struct mgmt_cp_add_uuid *cp = data;
2586 struct mgmt_pending_cmd *cmd;
2587 struct hci_request req;
2588 struct bt_uuid *uuid;
2591 BT_DBG("request for %s", hdev->name);
2595 if (pending_eir_or_class(hdev)) {
2596 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2601 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2607 memcpy(uuid->uuid, cp->uuid, 16);
2608 uuid->svc_hint = cp->svc_hint;
2609 uuid->size = get_uuid_size(cp->uuid);
2611 list_add_tail(&uuid->list, &hdev->uuids);
2613 hci_req_init(&req, hdev);
2618 err = hci_req_run(&req, add_uuid_complete);
2620 if (err != -ENODATA)
2623 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2624 hdev->dev_class, 3);
2628 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2637 hci_dev_unlock(hdev);
2641 static bool enable_service_cache(struct hci_dev *hdev)
2643 if (!hdev_is_powered(hdev))
2646 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2647 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2655 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2657 BT_DBG("status 0x%02x", status);
2659 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2662 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2665 struct mgmt_cp_remove_uuid *cp = data;
2666 struct mgmt_pending_cmd *cmd;
2667 struct bt_uuid *match, *tmp;
2668 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2669 struct hci_request req;
2672 BT_DBG("request for %s", hdev->name);
2676 if (pending_eir_or_class(hdev)) {
2677 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2682 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2683 hci_uuids_clear(hdev);
2685 if (enable_service_cache(hdev)) {
2686 err = mgmt_cmd_complete(sk, hdev->id,
2687 MGMT_OP_REMOVE_UUID,
2688 0, hdev->dev_class, 3);
2697 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2698 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2701 list_del(&match->list);
2707 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2708 MGMT_STATUS_INVALID_PARAMS);
2713 hci_req_init(&req, hdev);
2718 err = hci_req_run(&req, remove_uuid_complete);
2720 if (err != -ENODATA)
2723 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2724 hdev->dev_class, 3);
2728 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2737 hci_dev_unlock(hdev);
2741 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2743 BT_DBG("status 0x%02x", status);
2745 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2748 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2751 struct mgmt_cp_set_dev_class *cp = data;
2752 struct mgmt_pending_cmd *cmd;
2753 struct hci_request req;
2756 BT_DBG("request for %s", hdev->name);
2758 if (!lmp_bredr_capable(hdev))
2759 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2760 MGMT_STATUS_NOT_SUPPORTED);
2764 if (pending_eir_or_class(hdev)) {
2765 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2770 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2771 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2772 MGMT_STATUS_INVALID_PARAMS);
2776 hdev->major_class = cp->major;
2777 hdev->minor_class = cp->minor;
2779 if (!hdev_is_powered(hdev)) {
2780 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2781 hdev->dev_class, 3);
2785 hci_req_init(&req, hdev);
2787 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2788 hci_dev_unlock(hdev);
2789 cancel_delayed_work_sync(&hdev->service_cache);
2796 err = hci_req_run(&req, set_class_complete);
2798 if (err != -ENODATA)
2801 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2802 hdev->dev_class, 3);
2806 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2815 hci_dev_unlock(hdev);
2819 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2822 struct mgmt_cp_load_link_keys *cp = data;
2823 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2824 sizeof(struct mgmt_link_key_info));
2825 u16 key_count, expected_len;
2829 BT_DBG("request for %s", hdev->name);
2831 if (!lmp_bredr_capable(hdev))
2832 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2833 MGMT_STATUS_NOT_SUPPORTED);
2835 key_count = __le16_to_cpu(cp->key_count);
2836 if (key_count > max_key_count) {
2837 BT_ERR("load_link_keys: too big key_count value %u",
2839 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2840 MGMT_STATUS_INVALID_PARAMS);
2843 expected_len = sizeof(*cp) + key_count *
2844 sizeof(struct mgmt_link_key_info);
2845 if (expected_len != len) {
2846 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2848 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2849 MGMT_STATUS_INVALID_PARAMS);
2852 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2853 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2854 MGMT_STATUS_INVALID_PARAMS);
2856 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2859 for (i = 0; i < key_count; i++) {
2860 struct mgmt_link_key_info *key = &cp->keys[i];
2862 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2863 return mgmt_cmd_status(sk, hdev->id,
2864 MGMT_OP_LOAD_LINK_KEYS,
2865 MGMT_STATUS_INVALID_PARAMS);
2870 hci_link_keys_clear(hdev);
2873 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2875 changed = hci_dev_test_and_clear_flag(hdev,
2876 HCI_KEEP_DEBUG_KEYS);
2879 new_settings(hdev, NULL);
2881 for (i = 0; i < key_count; i++) {
2882 struct mgmt_link_key_info *key = &cp->keys[i];
2884 /* Always ignore debug keys and require a new pairing if
2885 * the user wants to use them.
2887 if (key->type == HCI_LK_DEBUG_COMBINATION)
2890 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2891 key->type, key->pin_len, NULL);
2894 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2896 hci_dev_unlock(hdev);
2901 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2902 u8 addr_type, struct sock *skip_sk)
2904 struct mgmt_ev_device_unpaired ev;
2906 bacpy(&ev.addr.bdaddr, bdaddr);
2907 ev.addr.type = addr_type;
2909 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2913 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2916 struct mgmt_cp_unpair_device *cp = data;
2917 struct mgmt_rp_unpair_device rp;
2918 struct hci_cp_disconnect dc;
2919 struct mgmt_pending_cmd *cmd;
2920 struct hci_conn *conn;
2923 memset(&rp, 0, sizeof(rp));
2924 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2925 rp.addr.type = cp->addr.type;
2927 if (!bdaddr_type_is_valid(cp->addr.type))
2928 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2929 MGMT_STATUS_INVALID_PARAMS,
2932 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2933 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2934 MGMT_STATUS_INVALID_PARAMS,
2939 if (!hdev_is_powered(hdev)) {
2940 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2941 MGMT_STATUS_NOT_POWERED, &rp,
2946 if (cp->addr.type == BDADDR_BREDR) {
2947 /* If disconnection is requested, then look up the
2948 * connection. If the remote device is connected, it
2949 * will be later used to terminate the link.
2951 * Setting it to NULL explicitly will cause no
2952 * termination of the link.
2955 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2960 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2964 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2967 /* Defer clearing up the connection parameters
2968 * until closing to give a chance of keeping
2969 * them if a repairing happens.
2971 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2973 /* If disconnection is not requested, then
2974 * clear the connection variable so that the
2975 * link is not terminated.
2977 if (!cp->disconnect)
2981 if (cp->addr.type == BDADDR_LE_PUBLIC)
2982 addr_type = ADDR_LE_DEV_PUBLIC;
2984 addr_type = ADDR_LE_DEV_RANDOM;
2986 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2988 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2992 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2993 MGMT_STATUS_NOT_PAIRED, &rp,
2998 /* If the connection variable is set, then termination of the
2999 * link is requested.
3002 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3004 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3008 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3015 cmd->cmd_complete = addr_cmd_complete;
3017 dc.handle = cpu_to_le16(conn->handle);
3018 dc.reason = 0x13; /* Remote User Terminated Connection */
3019 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
3021 mgmt_pending_remove(cmd);
3024 hci_dev_unlock(hdev);
3028 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3031 struct mgmt_cp_disconnect *cp = data;
3032 struct mgmt_rp_disconnect rp;
3033 struct mgmt_pending_cmd *cmd;
3034 struct hci_conn *conn;
3039 memset(&rp, 0, sizeof(rp));
3040 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3041 rp.addr.type = cp->addr.type;
3043 if (!bdaddr_type_is_valid(cp->addr.type))
3044 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3045 MGMT_STATUS_INVALID_PARAMS,
3050 if (!test_bit(HCI_UP, &hdev->flags)) {
3051 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3052 MGMT_STATUS_NOT_POWERED, &rp,
3057 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3058 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3059 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3063 if (cp->addr.type == BDADDR_BREDR)
3064 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3067 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
3069 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3070 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3071 MGMT_STATUS_NOT_CONNECTED, &rp,
3076 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3082 cmd->cmd_complete = generic_cmd_complete;
3084 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3086 mgmt_pending_remove(cmd);
3089 hci_dev_unlock(hdev);
3093 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3095 switch (link_type) {
3097 switch (addr_type) {
3098 case ADDR_LE_DEV_PUBLIC:
3099 return BDADDR_LE_PUBLIC;
3102 /* Fallback to LE Random address type */
3103 return BDADDR_LE_RANDOM;
3107 /* Fallback to BR/EDR type */
3108 return BDADDR_BREDR;
3112 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3115 struct mgmt_rp_get_connections *rp;
3125 if (!hdev_is_powered(hdev)) {
3126 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3127 MGMT_STATUS_NOT_POWERED);
3132 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3133 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3137 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3138 rp = kmalloc(rp_len, GFP_KERNEL);
3145 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3146 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3148 bacpy(&rp->addr[i].bdaddr, &c->dst);
3149 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3150 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3155 rp->conn_count = cpu_to_le16(i);
3157 /* Recalculate length in case of filtered SCO connections, etc */
3158 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3160 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3166 hci_dev_unlock(hdev);
3170 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3171 struct mgmt_cp_pin_code_neg_reply *cp)
3173 struct mgmt_pending_cmd *cmd;
3176 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3181 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3182 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3184 mgmt_pending_remove(cmd);
3189 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3192 struct hci_conn *conn;
3193 struct mgmt_cp_pin_code_reply *cp = data;
3194 struct hci_cp_pin_code_reply reply;
3195 struct mgmt_pending_cmd *cmd;
3202 if (!hdev_is_powered(hdev)) {
3203 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3204 MGMT_STATUS_NOT_POWERED);
3208 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3210 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3211 MGMT_STATUS_NOT_CONNECTED);
3215 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3216 struct mgmt_cp_pin_code_neg_reply ncp;
3218 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3220 BT_ERR("PIN code is not 16 bytes long");
3222 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3224 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3225 MGMT_STATUS_INVALID_PARAMS);
3230 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3236 cmd->cmd_complete = addr_cmd_complete;
3238 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3239 reply.pin_len = cp->pin_len;
3240 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3242 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3244 mgmt_pending_remove(cmd);
3247 hci_dev_unlock(hdev);
3251 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3254 struct mgmt_cp_set_io_capability *cp = data;
3258 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3259 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3260 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3264 hdev->io_capability = cp->io_capability;
3266 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3267 hdev->io_capability);
3269 hci_dev_unlock(hdev);
3271 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3275 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3277 struct hci_dev *hdev = conn->hdev;
3278 struct mgmt_pending_cmd *cmd;
3280 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3281 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3284 if (cmd->user_data != conn)
3293 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3295 struct mgmt_rp_pair_device rp;
3296 struct hci_conn *conn = cmd->user_data;
3299 bacpy(&rp.addr.bdaddr, &conn->dst);
3300 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3302 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3303 status, &rp, sizeof(rp));
3305 /* So we don't get further callbacks for this connection */
3306 conn->connect_cfm_cb = NULL;
3307 conn->security_cfm_cb = NULL;
3308 conn->disconn_cfm_cb = NULL;
3310 hci_conn_drop(conn);
3312 /* The device is paired so there is no need to remove
3313 * its connection parameters anymore.
3315 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3322 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3324 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3325 struct mgmt_pending_cmd *cmd;
3327 cmd = find_pairing(conn);
3329 cmd->cmd_complete(cmd, status);
3330 mgmt_pending_remove(cmd);
3334 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3336 struct mgmt_pending_cmd *cmd;
3338 BT_DBG("status %u", status);
3340 cmd = find_pairing(conn);
3342 BT_DBG("Unable to find a pending command");
3346 cmd->cmd_complete(cmd, mgmt_status(status));
3347 mgmt_pending_remove(cmd);
3350 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3352 struct mgmt_pending_cmd *cmd;
3354 BT_DBG("status %u", status);
3359 cmd = find_pairing(conn);
3361 BT_DBG("Unable to find a pending command");
3365 cmd->cmd_complete(cmd, mgmt_status(status));
3366 mgmt_pending_remove(cmd);
3369 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3372 struct mgmt_cp_pair_device *cp = data;
3373 struct mgmt_rp_pair_device rp;
3374 struct mgmt_pending_cmd *cmd;
3375 u8 sec_level, auth_type;
3376 struct hci_conn *conn;
3381 memset(&rp, 0, sizeof(rp));
3382 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3383 rp.addr.type = cp->addr.type;
3385 if (!bdaddr_type_is_valid(cp->addr.type))
3386 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3387 MGMT_STATUS_INVALID_PARAMS,
3390 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3391 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3392 MGMT_STATUS_INVALID_PARAMS,
3397 if (!hdev_is_powered(hdev)) {
3398 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3399 MGMT_STATUS_NOT_POWERED, &rp,
3404 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3405 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3406 MGMT_STATUS_ALREADY_PAIRED, &rp,
3411 sec_level = BT_SECURITY_MEDIUM;
3412 auth_type = HCI_AT_DEDICATED_BONDING;
3414 if (cp->addr.type == BDADDR_BREDR) {
3415 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3420 /* Convert from L2CAP channel address type to HCI address type
3422 if (cp->addr.type == BDADDR_LE_PUBLIC)
3423 addr_type = ADDR_LE_DEV_PUBLIC;
3425 addr_type = ADDR_LE_DEV_RANDOM;
3427 /* When pairing a new device, it is expected to remember
3428 * this device for future connections. Adding the connection
3429 * parameter information ahead of time allows tracking
3430 * of the slave preferred values and will speed up any
3431 * further connection establishment.
3433 * If connection parameters already exist, then they
3434 * will be kept and this function does nothing.
3436 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3438 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3439 sec_level, HCI_LE_CONN_TIMEOUT,
3446 if (PTR_ERR(conn) == -EBUSY)
3447 status = MGMT_STATUS_BUSY;
3448 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3449 status = MGMT_STATUS_NOT_SUPPORTED;
3450 else if (PTR_ERR(conn) == -ECONNREFUSED)
3451 status = MGMT_STATUS_REJECTED;
3453 status = MGMT_STATUS_CONNECT_FAILED;
3455 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3456 status, &rp, sizeof(rp));
3460 if (conn->connect_cfm_cb) {
3461 hci_conn_drop(conn);
3462 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3463 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3467 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3470 hci_conn_drop(conn);
3474 cmd->cmd_complete = pairing_complete;
3476 /* For LE, just connecting isn't a proof that the pairing finished */
3477 if (cp->addr.type == BDADDR_BREDR) {
3478 conn->connect_cfm_cb = pairing_complete_cb;
3479 conn->security_cfm_cb = pairing_complete_cb;
3480 conn->disconn_cfm_cb = pairing_complete_cb;
3482 conn->connect_cfm_cb = le_pairing_complete_cb;
3483 conn->security_cfm_cb = le_pairing_complete_cb;
3484 conn->disconn_cfm_cb = le_pairing_complete_cb;
3487 conn->io_capability = cp->io_cap;
3488 cmd->user_data = hci_conn_get(conn);
3490 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3491 hci_conn_security(conn, sec_level, auth_type, true)) {
3492 cmd->cmd_complete(cmd, 0);
3493 mgmt_pending_remove(cmd);
3499 hci_dev_unlock(hdev);
3503 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3506 struct mgmt_addr_info *addr = data;
3507 struct mgmt_pending_cmd *cmd;
3508 struct hci_conn *conn;
3515 if (!hdev_is_powered(hdev)) {
3516 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3517 MGMT_STATUS_NOT_POWERED);
3521 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3523 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3524 MGMT_STATUS_INVALID_PARAMS);
3528 conn = cmd->user_data;
3530 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3531 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3532 MGMT_STATUS_INVALID_PARAMS);
3536 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3537 mgmt_pending_remove(cmd);
3539 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3540 addr, sizeof(*addr));
3542 hci_dev_unlock(hdev);
3546 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3547 struct mgmt_addr_info *addr, u16 mgmt_op,
3548 u16 hci_op, __le32 passkey)
3550 struct mgmt_pending_cmd *cmd;
3551 struct hci_conn *conn;
3556 if (!hdev_is_powered(hdev)) {
3557 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3558 MGMT_STATUS_NOT_POWERED, addr,
3563 if (addr->type == BDADDR_BREDR)
3564 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3566 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3569 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3570 MGMT_STATUS_NOT_CONNECTED, addr,
3575 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3576 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3578 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3579 MGMT_STATUS_SUCCESS, addr,
3582 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3583 MGMT_STATUS_FAILED, addr,
3589 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3595 cmd->cmd_complete = addr_cmd_complete;
3597 /* Continue with pairing via HCI */
3598 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3599 struct hci_cp_user_passkey_reply cp;
3601 bacpy(&cp.bdaddr, &addr->bdaddr);
3602 cp.passkey = passkey;
3603 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3605 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3609 mgmt_pending_remove(cmd);
3612 hci_dev_unlock(hdev);
3616 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3617 void *data, u16 len)
3619 struct mgmt_cp_pin_code_neg_reply *cp = data;
3623 return user_pairing_resp(sk, hdev, &cp->addr,
3624 MGMT_OP_PIN_CODE_NEG_REPLY,
3625 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3628 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3631 struct mgmt_cp_user_confirm_reply *cp = data;
3635 if (len != sizeof(*cp))
3636 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3637 MGMT_STATUS_INVALID_PARAMS);
3639 return user_pairing_resp(sk, hdev, &cp->addr,
3640 MGMT_OP_USER_CONFIRM_REPLY,
3641 HCI_OP_USER_CONFIRM_REPLY, 0);
3644 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3645 void *data, u16 len)
3647 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3651 return user_pairing_resp(sk, hdev, &cp->addr,
3652 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3653 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3656 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3659 struct mgmt_cp_user_passkey_reply *cp = data;
3663 return user_pairing_resp(sk, hdev, &cp->addr,
3664 MGMT_OP_USER_PASSKEY_REPLY,
3665 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3668 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3669 void *data, u16 len)
3671 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3675 return user_pairing_resp(sk, hdev, &cp->addr,
3676 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3677 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3680 static void update_name(struct hci_request *req)
3682 struct hci_dev *hdev = req->hdev;
3683 struct hci_cp_write_local_name cp;
3685 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3687 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3690 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3692 struct mgmt_cp_set_local_name *cp;
3693 struct mgmt_pending_cmd *cmd;
3695 BT_DBG("status 0x%02x", status);
3699 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3706 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3707 mgmt_status(status));
3709 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3712 mgmt_pending_remove(cmd);
3715 hci_dev_unlock(hdev);
3718 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3721 struct mgmt_cp_set_local_name *cp = data;
3722 struct mgmt_pending_cmd *cmd;
3723 struct hci_request req;
3730 /* If the old values are the same as the new ones just return a
3731 * direct command complete event.
3733 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3734 !memcmp(hdev->short_name, cp->short_name,
3735 sizeof(hdev->short_name))) {
3736 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3741 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3743 if (!hdev_is_powered(hdev)) {
3744 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3746 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3751 err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3757 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3763 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3765 hci_req_init(&req, hdev);
3767 if (lmp_bredr_capable(hdev)) {
3772 /* The name is stored in the scan response data and so
3773 * no need to udpate the advertising data here.
3775 if (lmp_le_capable(hdev))
3776 update_scan_rsp_data(&req);
3778 err = hci_req_run(&req, set_name_complete);
3780 mgmt_pending_remove(cmd);
3783 hci_dev_unlock(hdev);
3787 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3788 void *data, u16 data_len)
3790 struct mgmt_pending_cmd *cmd;
3793 BT_DBG("%s", hdev->name);
3797 if (!hdev_is_powered(hdev)) {
3798 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3799 MGMT_STATUS_NOT_POWERED);
3803 if (!lmp_ssp_capable(hdev)) {
3804 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3805 MGMT_STATUS_NOT_SUPPORTED);
3809 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3810 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3815 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3821 if (bredr_sc_enabled(hdev))
3822 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3825 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3828 mgmt_pending_remove(cmd);
3831 hci_dev_unlock(hdev);
3835 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3836 void *data, u16 len)
3838 struct mgmt_addr_info *addr = data;
3841 BT_DBG("%s ", hdev->name);
3843 if (!bdaddr_type_is_valid(addr->type))
3844 return mgmt_cmd_complete(sk, hdev->id,
3845 MGMT_OP_ADD_REMOTE_OOB_DATA,
3846 MGMT_STATUS_INVALID_PARAMS,
3847 addr, sizeof(*addr));
3851 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3852 struct mgmt_cp_add_remote_oob_data *cp = data;
3855 if (cp->addr.type != BDADDR_BREDR) {
3856 err = mgmt_cmd_complete(sk, hdev->id,
3857 MGMT_OP_ADD_REMOTE_OOB_DATA,
3858 MGMT_STATUS_INVALID_PARAMS,
3859 &cp->addr, sizeof(cp->addr));
3863 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3864 cp->addr.type, cp->hash,
3865 cp->rand, NULL, NULL);
3867 status = MGMT_STATUS_FAILED;
3869 status = MGMT_STATUS_SUCCESS;
3871 err = mgmt_cmd_complete(sk, hdev->id,
3872 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3873 &cp->addr, sizeof(cp->addr));
3874 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3875 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3876 u8 *rand192, *hash192, *rand256, *hash256;
3879 if (bdaddr_type_is_le(cp->addr.type)) {
3880 /* Enforce zero-valued 192-bit parameters as
3881 * long as legacy SMP OOB isn't implemented.
3883 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3884 memcmp(cp->hash192, ZERO_KEY, 16)) {
3885 err = mgmt_cmd_complete(sk, hdev->id,
3886 MGMT_OP_ADD_REMOTE_OOB_DATA,
3887 MGMT_STATUS_INVALID_PARAMS,
3888 addr, sizeof(*addr));
3895 /* In case one of the P-192 values is set to zero,
3896 * then just disable OOB data for P-192.
3898 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3899 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3903 rand192 = cp->rand192;
3904 hash192 = cp->hash192;
3908 /* In case one of the P-256 values is set to zero, then just
3909 * disable OOB data for P-256.
3911 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3912 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3916 rand256 = cp->rand256;
3917 hash256 = cp->hash256;
3920 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3921 cp->addr.type, hash192, rand192,
3924 status = MGMT_STATUS_FAILED;
3926 status = MGMT_STATUS_SUCCESS;
3928 err = mgmt_cmd_complete(sk, hdev->id,
3929 MGMT_OP_ADD_REMOTE_OOB_DATA,
3930 status, &cp->addr, sizeof(cp->addr));
3932 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3933 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3934 MGMT_STATUS_INVALID_PARAMS);
3938 hci_dev_unlock(hdev);
3942 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3943 void *data, u16 len)
3945 struct mgmt_cp_remove_remote_oob_data *cp = data;
3949 BT_DBG("%s", hdev->name);
3951 if (cp->addr.type != BDADDR_BREDR)
3952 return mgmt_cmd_complete(sk, hdev->id,
3953 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3954 MGMT_STATUS_INVALID_PARAMS,
3955 &cp->addr, sizeof(cp->addr));
3959 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3960 hci_remote_oob_data_clear(hdev);
3961 status = MGMT_STATUS_SUCCESS;
3965 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3967 status = MGMT_STATUS_INVALID_PARAMS;
3969 status = MGMT_STATUS_SUCCESS;
3972 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3973 status, &cp->addr, sizeof(cp->addr));
3975 hci_dev_unlock(hdev);
3979 static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
3981 struct hci_dev *hdev = req->hdev;
3982 struct hci_cp_inquiry cp;
3983 /* General inquiry access code (GIAC) */
3984 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3986 *status = mgmt_bredr_support(hdev);
3990 if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
3991 *status = MGMT_STATUS_BUSY;
3995 hci_inquiry_cache_flush(hdev);
3997 memset(&cp, 0, sizeof(cp));
3998 memcpy(&cp.lap, lap, sizeof(cp.lap));
3999 cp.length = DISCOV_BREDR_INQUIRY_LEN;
4001 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
4006 static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
4008 struct hci_dev *hdev = req->hdev;
4009 struct hci_cp_le_set_scan_param param_cp;
4010 struct hci_cp_le_set_scan_enable enable_cp;
4014 *status = mgmt_le_support(hdev);
4018 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
4019 /* Don't let discovery abort an outgoing connection attempt
4020 * that's using directed advertising.
4022 if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
4023 *status = MGMT_STATUS_REJECTED;
4027 disable_advertising(req);
4030 /* If controller is scanning, it means the background scanning is
4031 * running. Thus, we should temporarily stop it in order to set the
4032 * discovery scanning parameters.
4034 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
4035 hci_req_add_le_scan_disable(req);
4037 /* All active scans will be done with either a resolvable private
4038 * address (when privacy feature has been enabled) or non-resolvable
4041 err = hci_update_random_address(req, true, &own_addr_type);
4043 *status = MGMT_STATUS_FAILED;
4047 memset(¶m_cp, 0, sizeof(param_cp));
4048 param_cp.type = LE_SCAN_ACTIVE;
4049 param_cp.interval = cpu_to_le16(interval);
4050 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
4051 param_cp.own_address_type = own_addr_type;
4053 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
4056 memset(&enable_cp, 0, sizeof(enable_cp));
4057 enable_cp.enable = LE_SCAN_ENABLE;
4058 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4060 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
4066 static bool trigger_discovery(struct hci_request *req, u8 *status)
4068 struct hci_dev *hdev = req->hdev;
4070 switch (hdev->discovery.type) {
4071 case DISCOV_TYPE_BREDR:
4072 if (!trigger_bredr_inquiry(req, status))
4076 case DISCOV_TYPE_INTERLEAVED:
4077 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
4079 /* During simultaneous discovery, we double LE scan
4080 * interval. We must leave some time for the controller
4081 * to do BR/EDR inquiry.
4083 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
4087 if (!trigger_bredr_inquiry(req, status))
4093 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4094 *status = MGMT_STATUS_NOT_SUPPORTED;
4099 case DISCOV_TYPE_LE:
4100 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
4105 *status = MGMT_STATUS_INVALID_PARAMS;
4112 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
4115 struct mgmt_pending_cmd *cmd;
4116 unsigned long timeout;
4118 BT_DBG("status %d", status);
4122 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4124 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4127 cmd->cmd_complete(cmd, mgmt_status(status));
4128 mgmt_pending_remove(cmd);
4132 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4136 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
4138 /* If the scan involves LE scan, pick proper timeout to schedule
4139 * hdev->le_scan_disable that will stop it.
4141 switch (hdev->discovery.type) {
4142 case DISCOV_TYPE_LE:
4143 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4145 case DISCOV_TYPE_INTERLEAVED:
4146 /* When running simultaneous discovery, the LE scanning time
4147 * should occupy the whole discovery time sine BR/EDR inquiry
4148 * and LE scanning are scheduled by the controller.
4150 * For interleaving discovery in comparison, BR/EDR inquiry
4151 * and LE scanning are done sequentially with separate
4154 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
4155 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4157 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4159 case DISCOV_TYPE_BREDR:
4163 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4169 /* When service discovery is used and the controller has
4170 * a strict duplicate filter, it is important to remember
4171 * the start and duration of the scan. This is required
4172 * for restarting scanning during the discovery phase.
4174 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
4176 hdev->discovery.result_filtering) {
4177 hdev->discovery.scan_start = jiffies;
4178 hdev->discovery.scan_duration = timeout;
4181 queue_delayed_work(hdev->workqueue,
4182 &hdev->le_scan_disable, timeout);
4186 hci_dev_unlock(hdev);
4189 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4190 void *data, u16 len)
4192 struct mgmt_cp_start_discovery *cp = data;
4193 struct mgmt_pending_cmd *cmd;
4194 struct hci_request req;
4198 BT_DBG("%s", hdev->name);
4202 if (!hdev_is_powered(hdev)) {
4203 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4204 MGMT_STATUS_NOT_POWERED,
4205 &cp->type, sizeof(cp->type));
4209 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4210 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4211 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4212 MGMT_STATUS_BUSY, &cp->type,
4217 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4223 cmd->cmd_complete = generic_cmd_complete;
4225 /* Clear the discovery filter first to free any previously
4226 * allocated memory for the UUID list.
4228 hci_discovery_filter_clear(hdev);
4230 hdev->discovery.type = cp->type;
4231 hdev->discovery.report_invalid_rssi = false;
4233 hci_req_init(&req, hdev);
4235 if (!trigger_discovery(&req, &status)) {
4236 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4237 status, &cp->type, sizeof(cp->type));
4238 mgmt_pending_remove(cmd);
4242 err = hci_req_run(&req, start_discovery_complete);
4244 mgmt_pending_remove(cmd);
4248 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4251 hci_dev_unlock(hdev);
4255 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4258 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4262 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4263 void *data, u16 len)
4265 struct mgmt_cp_start_service_discovery *cp = data;
4266 struct mgmt_pending_cmd *cmd;
4267 struct hci_request req;
4268 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4269 u16 uuid_count, expected_len;
4273 BT_DBG("%s", hdev->name);
4277 if (!hdev_is_powered(hdev)) {
4278 err = mgmt_cmd_complete(sk, hdev->id,
4279 MGMT_OP_START_SERVICE_DISCOVERY,
4280 MGMT_STATUS_NOT_POWERED,
4281 &cp->type, sizeof(cp->type));
4285 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4286 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4287 err = mgmt_cmd_complete(sk, hdev->id,
4288 MGMT_OP_START_SERVICE_DISCOVERY,
4289 MGMT_STATUS_BUSY, &cp->type,
4294 uuid_count = __le16_to_cpu(cp->uuid_count);
4295 if (uuid_count > max_uuid_count) {
4296 BT_ERR("service_discovery: too big uuid_count value %u",
4298 err = mgmt_cmd_complete(sk, hdev->id,
4299 MGMT_OP_START_SERVICE_DISCOVERY,
4300 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4305 expected_len = sizeof(*cp) + uuid_count * 16;
4306 if (expected_len != len) {
4307 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4309 err = mgmt_cmd_complete(sk, hdev->id,
4310 MGMT_OP_START_SERVICE_DISCOVERY,
4311 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4316 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4323 cmd->cmd_complete = service_discovery_cmd_complete;
4325 /* Clear the discovery filter first to free any previously
4326 * allocated memory for the UUID list.
4328 hci_discovery_filter_clear(hdev);
4330 hdev->discovery.result_filtering = true;
4331 hdev->discovery.type = cp->type;
4332 hdev->discovery.rssi = cp->rssi;
4333 hdev->discovery.uuid_count = uuid_count;
4335 if (uuid_count > 0) {
4336 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4338 if (!hdev->discovery.uuids) {
4339 err = mgmt_cmd_complete(sk, hdev->id,
4340 MGMT_OP_START_SERVICE_DISCOVERY,
4342 &cp->type, sizeof(cp->type));
4343 mgmt_pending_remove(cmd);
4348 hci_req_init(&req, hdev);
4350 if (!trigger_discovery(&req, &status)) {
4351 err = mgmt_cmd_complete(sk, hdev->id,
4352 MGMT_OP_START_SERVICE_DISCOVERY,
4353 status, &cp->type, sizeof(cp->type));
4354 mgmt_pending_remove(cmd);
4358 err = hci_req_run(&req, start_discovery_complete);
4360 mgmt_pending_remove(cmd);
4364 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4367 hci_dev_unlock(hdev);
4371 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4373 struct mgmt_pending_cmd *cmd;
4375 BT_DBG("status %d", status);
4379 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4381 cmd->cmd_complete(cmd, mgmt_status(status));
4382 mgmt_pending_remove(cmd);
4386 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4388 hci_dev_unlock(hdev);
4391 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4394 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4395 struct mgmt_pending_cmd *cmd;
4396 struct hci_request req;
4399 BT_DBG("%s", hdev->name);
4403 if (!hci_discovery_active(hdev)) {
4404 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4405 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4406 sizeof(mgmt_cp->type));
4410 if (hdev->discovery.type != mgmt_cp->type) {
4411 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4412 MGMT_STATUS_INVALID_PARAMS,
4413 &mgmt_cp->type, sizeof(mgmt_cp->type));
4417 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4423 cmd->cmd_complete = generic_cmd_complete;
4425 hci_req_init(&req, hdev);
4427 hci_stop_discovery(&req);
4429 err = hci_req_run(&req, stop_discovery_complete);
4431 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4435 mgmt_pending_remove(cmd);
4437 /* If no HCI commands were sent we're done */
4438 if (err == -ENODATA) {
4439 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4440 &mgmt_cp->type, sizeof(mgmt_cp->type));
4441 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4445 hci_dev_unlock(hdev);
4449 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4452 struct mgmt_cp_confirm_name *cp = data;
4453 struct inquiry_entry *e;
4456 BT_DBG("%s", hdev->name);
4460 if (!hci_discovery_active(hdev)) {
4461 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4462 MGMT_STATUS_FAILED, &cp->addr,
4467 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4469 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4470 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4475 if (cp->name_known) {
4476 e->name_state = NAME_KNOWN;
4479 e->name_state = NAME_NEEDED;
4480 hci_inquiry_cache_update_resolve(hdev, e);
4483 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4484 &cp->addr, sizeof(cp->addr));
4487 hci_dev_unlock(hdev);
4491 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4494 struct mgmt_cp_block_device *cp = data;
4498 BT_DBG("%s", hdev->name);
4500 if (!bdaddr_type_is_valid(cp->addr.type))
4501 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4502 MGMT_STATUS_INVALID_PARAMS,
4503 &cp->addr, sizeof(cp->addr));
4507 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4510 status = MGMT_STATUS_FAILED;
4514 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4516 status = MGMT_STATUS_SUCCESS;
4519 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4520 &cp->addr, sizeof(cp->addr));
4522 hci_dev_unlock(hdev);
4527 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4530 struct mgmt_cp_unblock_device *cp = data;
4534 BT_DBG("%s", hdev->name);
4536 if (!bdaddr_type_is_valid(cp->addr.type))
4537 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4538 MGMT_STATUS_INVALID_PARAMS,
4539 &cp->addr, sizeof(cp->addr));
4543 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4546 status = MGMT_STATUS_INVALID_PARAMS;
4550 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4552 status = MGMT_STATUS_SUCCESS;
4555 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4556 &cp->addr, sizeof(cp->addr));
4558 hci_dev_unlock(hdev);
4563 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4566 struct mgmt_cp_set_device_id *cp = data;
4567 struct hci_request req;
4571 BT_DBG("%s", hdev->name);
4573 source = __le16_to_cpu(cp->source);
4575 if (source > 0x0002)
4576 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4577 MGMT_STATUS_INVALID_PARAMS);
4581 hdev->devid_source = source;
4582 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4583 hdev->devid_product = __le16_to_cpu(cp->product);
4584 hdev->devid_version = __le16_to_cpu(cp->version);
4586 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4589 hci_req_init(&req, hdev);
4591 hci_req_run(&req, NULL);
4593 hci_dev_unlock(hdev);
4598 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4601 BT_DBG("status %d", status);
4604 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4607 struct cmd_lookup match = { NULL, hdev };
4608 struct hci_request req;
4613 u8 mgmt_err = mgmt_status(status);
4615 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4616 cmd_status_rsp, &mgmt_err);
4620 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4621 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4623 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4625 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4628 new_settings(hdev, match.sk);
4633 /* If "Set Advertising" was just disabled and instance advertising was
4634 * set up earlier, then enable the advertising instance.
4636 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4637 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
4640 hci_req_init(&req, hdev);
4642 update_adv_data(&req);
4643 enable_advertising(&req);
4645 if (hci_req_run(&req, enable_advertising_instance) < 0)
4646 BT_ERR("Failed to re-configure advertising");
4649 hci_dev_unlock(hdev);
4652 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4655 struct mgmt_mode *cp = data;
4656 struct mgmt_pending_cmd *cmd;
4657 struct hci_request req;
4661 BT_DBG("request for %s", hdev->name);
4663 status = mgmt_le_support(hdev);
4665 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4668 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4669 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4670 MGMT_STATUS_INVALID_PARAMS);
4676 /* The following conditions are ones which mean that we should
4677 * not do any HCI communication but directly send a mgmt
4678 * response to user space (after toggling the flag if
4681 if (!hdev_is_powered(hdev) ||
4682 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4683 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4684 hci_conn_num(hdev, LE_LINK) > 0 ||
4685 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4686 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4690 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4691 if (cp->val == 0x02)
4692 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4694 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4696 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4697 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4700 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4705 err = new_settings(hdev, sk);
4710 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4711 pending_find(MGMT_OP_SET_LE, hdev)) {
4712 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4717 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4723 hci_req_init(&req, hdev);
4725 if (cp->val == 0x02)
4726 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4728 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4731 /* Switch to instance "0" for the Set Advertising setting. */
4732 update_adv_data_for_instance(&req, 0);
4733 update_scan_rsp_data_for_instance(&req, 0);
4734 enable_advertising(&req);
4736 disable_advertising(&req);
4739 err = hci_req_run(&req, set_advertising_complete);
4741 mgmt_pending_remove(cmd);
4744 hci_dev_unlock(hdev);
4748 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4749 void *data, u16 len)
4751 struct mgmt_cp_set_static_address *cp = data;
4754 BT_DBG("%s", hdev->name);
4756 if (!lmp_le_capable(hdev))
4757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4758 MGMT_STATUS_NOT_SUPPORTED);
4760 if (hdev_is_powered(hdev))
4761 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4762 MGMT_STATUS_REJECTED);
4764 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4765 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4766 return mgmt_cmd_status(sk, hdev->id,
4767 MGMT_OP_SET_STATIC_ADDRESS,
4768 MGMT_STATUS_INVALID_PARAMS);
4770 /* Two most significant bits shall be set */
4771 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4772 return mgmt_cmd_status(sk, hdev->id,
4773 MGMT_OP_SET_STATIC_ADDRESS,
4774 MGMT_STATUS_INVALID_PARAMS);
4779 bacpy(&hdev->static_addr, &cp->bdaddr);
4781 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4785 err = new_settings(hdev, sk);
4788 hci_dev_unlock(hdev);
4792 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4793 void *data, u16 len)
4795 struct mgmt_cp_set_scan_params *cp = data;
4796 __u16 interval, window;
4799 BT_DBG("%s", hdev->name);
4801 if (!lmp_le_capable(hdev))
4802 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4803 MGMT_STATUS_NOT_SUPPORTED);
4805 interval = __le16_to_cpu(cp->interval);
4807 if (interval < 0x0004 || interval > 0x4000)
4808 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4809 MGMT_STATUS_INVALID_PARAMS);
4811 window = __le16_to_cpu(cp->window);
4813 if (window < 0x0004 || window > 0x4000)
4814 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4815 MGMT_STATUS_INVALID_PARAMS);
4817 if (window > interval)
4818 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4819 MGMT_STATUS_INVALID_PARAMS);
4823 hdev->le_scan_interval = interval;
4824 hdev->le_scan_window = window;
4826 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4829 /* If background scan is running, restart it so new parameters are
4832 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4833 hdev->discovery.state == DISCOVERY_STOPPED) {
4834 struct hci_request req;
4836 hci_req_init(&req, hdev);
4838 hci_req_add_le_scan_disable(&req);
4839 hci_req_add_le_passive_scan(&req);
4841 hci_req_run(&req, NULL);
4844 hci_dev_unlock(hdev);
4849 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4852 struct mgmt_pending_cmd *cmd;
4854 BT_DBG("status 0x%02x", status);
4858 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4863 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4864 mgmt_status(status));
4866 struct mgmt_mode *cp = cmd->param;
4869 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4871 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4873 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4874 new_settings(hdev, cmd->sk);
4877 mgmt_pending_remove(cmd);
4880 hci_dev_unlock(hdev);
4883 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4884 void *data, u16 len)
4886 struct mgmt_mode *cp = data;
4887 struct mgmt_pending_cmd *cmd;
4888 struct hci_request req;
4891 BT_DBG("%s", hdev->name);
4893 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4894 hdev->hci_ver < BLUETOOTH_VER_1_2)
4895 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4896 MGMT_STATUS_NOT_SUPPORTED);
4898 if (cp->val != 0x00 && cp->val != 0x01)
4899 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4900 MGMT_STATUS_INVALID_PARAMS);
4904 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4905 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4910 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4911 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4916 if (!hdev_is_powered(hdev)) {
4917 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4918 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4920 new_settings(hdev, sk);
4924 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4931 hci_req_init(&req, hdev);
4933 write_fast_connectable(&req, cp->val);
4935 err = hci_req_run(&req, fast_connectable_complete);
4937 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4938 MGMT_STATUS_FAILED);
4939 mgmt_pending_remove(cmd);
4943 hci_dev_unlock(hdev);
4948 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4950 struct mgmt_pending_cmd *cmd;
4952 BT_DBG("status 0x%02x", status);
4956 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4961 u8 mgmt_err = mgmt_status(status);
4963 /* We need to restore the flag if related HCI commands
4966 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4968 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4970 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4971 new_settings(hdev, cmd->sk);
4974 mgmt_pending_remove(cmd);
4977 hci_dev_unlock(hdev);
4980 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4982 struct mgmt_mode *cp = data;
4983 struct mgmt_pending_cmd *cmd;
4984 struct hci_request req;
4987 BT_DBG("request for %s", hdev->name);
4989 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4990 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4991 MGMT_STATUS_NOT_SUPPORTED);
4993 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4994 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4995 MGMT_STATUS_REJECTED);
4997 if (cp->val != 0x00 && cp->val != 0x01)
4998 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4999 MGMT_STATUS_INVALID_PARAMS);
5003 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5004 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5008 if (!hdev_is_powered(hdev)) {
5010 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5011 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5012 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5013 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5014 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5017 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5019 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5023 err = new_settings(hdev, sk);
5027 /* Reject disabling when powered on */
5029 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5030 MGMT_STATUS_REJECTED);
5033 /* When configuring a dual-mode controller to operate
5034 * with LE only and using a static address, then switching
5035 * BR/EDR back on is not allowed.
5037 * Dual-mode controllers shall operate with the public
5038 * address as its identity address for BR/EDR and LE. So
5039 * reject the attempt to create an invalid configuration.
5041 * The same restrictions applies when secure connections
5042 * has been enabled. For BR/EDR this is a controller feature
5043 * while for LE it is a host stack feature. This means that
5044 * switching BR/EDR back on when secure connections has been
5045 * enabled is not a supported transaction.
5047 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5048 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5049 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5050 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5051 MGMT_STATUS_REJECTED);
5056 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5057 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5062 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5068 /* We need to flip the bit already here so that update_adv_data
5069 * generates the correct flags.
5071 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5073 hci_req_init(&req, hdev);
5075 write_fast_connectable(&req, false);
5076 __hci_update_page_scan(&req);
5078 /* Since only the advertising data flags will change, there
5079 * is no need to update the scan response data.
5081 update_adv_data(&req);
5083 err = hci_req_run(&req, set_bredr_complete);
5085 mgmt_pending_remove(cmd);
5088 hci_dev_unlock(hdev);
5092 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5094 struct mgmt_pending_cmd *cmd;
5095 struct mgmt_mode *cp;
5097 BT_DBG("%s status %u", hdev->name, status);
5101 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5106 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5107 mgmt_status(status));
5115 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5116 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5119 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5120 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5123 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5124 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5128 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5129 new_settings(hdev, cmd->sk);
5132 mgmt_pending_remove(cmd);
5134 hci_dev_unlock(hdev);
5137 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5138 void *data, u16 len)
5140 struct mgmt_mode *cp = data;
5141 struct mgmt_pending_cmd *cmd;
5142 struct hci_request req;
5146 BT_DBG("request for %s", hdev->name);
5148 if (!lmp_sc_capable(hdev) &&
5149 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5150 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5151 MGMT_STATUS_NOT_SUPPORTED);
5153 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5154 lmp_sc_capable(hdev) &&
5155 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5156 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5157 MGMT_STATUS_REJECTED);
5159 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5160 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5161 MGMT_STATUS_INVALID_PARAMS);
5165 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5166 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5170 changed = !hci_dev_test_and_set_flag(hdev,
5172 if (cp->val == 0x02)
5173 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5175 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5177 changed = hci_dev_test_and_clear_flag(hdev,
5179 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5182 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5187 err = new_settings(hdev, sk);
5192 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5193 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5200 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5201 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5202 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5206 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5212 hci_req_init(&req, hdev);
5213 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5214 err = hci_req_run(&req, sc_enable_complete);
5216 mgmt_pending_remove(cmd);
5221 hci_dev_unlock(hdev);
5225 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5226 void *data, u16 len)
5228 struct mgmt_mode *cp = data;
5229 bool changed, use_changed;
5232 BT_DBG("request for %s", hdev->name);
5234 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5235 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5236 MGMT_STATUS_INVALID_PARAMS);
5241 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5243 changed = hci_dev_test_and_clear_flag(hdev,
5244 HCI_KEEP_DEBUG_KEYS);
5246 if (cp->val == 0x02)
5247 use_changed = !hci_dev_test_and_set_flag(hdev,
5248 HCI_USE_DEBUG_KEYS);
5250 use_changed = hci_dev_test_and_clear_flag(hdev,
5251 HCI_USE_DEBUG_KEYS);
5253 if (hdev_is_powered(hdev) && use_changed &&
5254 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5255 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5256 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5257 sizeof(mode), &mode);
5260 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5265 err = new_settings(hdev, sk);
5268 hci_dev_unlock(hdev);
5272 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5275 struct mgmt_cp_set_privacy *cp = cp_data;
5279 BT_DBG("request for %s", hdev->name);
5281 if (!lmp_le_capable(hdev))
5282 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5283 MGMT_STATUS_NOT_SUPPORTED);
5285 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5286 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5287 MGMT_STATUS_INVALID_PARAMS);
5289 if (hdev_is_powered(hdev))
5290 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5291 MGMT_STATUS_REJECTED);
5295 /* If user space supports this command it is also expected to
5296 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5298 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5301 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5302 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5303 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5305 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5306 memset(hdev->irk, 0, sizeof(hdev->irk));
5307 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5310 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5315 err = new_settings(hdev, sk);
5318 hci_dev_unlock(hdev);
5322 static bool irk_is_valid(struct mgmt_irk_info *irk)
5324 switch (irk->addr.type) {
5325 case BDADDR_LE_PUBLIC:
5328 case BDADDR_LE_RANDOM:
5329 /* Two most significant bits shall be set */
5330 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5338 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5341 struct mgmt_cp_load_irks *cp = cp_data;
5342 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5343 sizeof(struct mgmt_irk_info));
5344 u16 irk_count, expected_len;
5347 BT_DBG("request for %s", hdev->name);
5349 if (!lmp_le_capable(hdev))
5350 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5351 MGMT_STATUS_NOT_SUPPORTED);
5353 irk_count = __le16_to_cpu(cp->irk_count);
5354 if (irk_count > max_irk_count) {
5355 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5356 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5357 MGMT_STATUS_INVALID_PARAMS);
5360 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5361 if (expected_len != len) {
5362 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5364 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5365 MGMT_STATUS_INVALID_PARAMS);
5368 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5370 for (i = 0; i < irk_count; i++) {
5371 struct mgmt_irk_info *key = &cp->irks[i];
5373 if (!irk_is_valid(key))
5374 return mgmt_cmd_status(sk, hdev->id,
5376 MGMT_STATUS_INVALID_PARAMS);
5381 hci_smp_irks_clear(hdev);
5383 for (i = 0; i < irk_count; i++) {
5384 struct mgmt_irk_info *irk = &cp->irks[i];
5387 if (irk->addr.type == BDADDR_LE_PUBLIC)
5388 addr_type = ADDR_LE_DEV_PUBLIC;
5390 addr_type = ADDR_LE_DEV_RANDOM;
5392 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5396 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5398 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5400 hci_dev_unlock(hdev);
5405 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5407 if (key->master != 0x00 && key->master != 0x01)
5410 switch (key->addr.type) {
5411 case BDADDR_LE_PUBLIC:
5414 case BDADDR_LE_RANDOM:
5415 /* Two most significant bits shall be set */
5416 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5424 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5425 void *cp_data, u16 len)
5427 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5428 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5429 sizeof(struct mgmt_ltk_info));
5430 u16 key_count, expected_len;
5433 BT_DBG("request for %s", hdev->name);
5435 if (!lmp_le_capable(hdev))
5436 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5437 MGMT_STATUS_NOT_SUPPORTED);
5439 key_count = __le16_to_cpu(cp->key_count);
5440 if (key_count > max_key_count) {
5441 BT_ERR("load_ltks: too big key_count value %u", key_count);
5442 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5443 MGMT_STATUS_INVALID_PARAMS);
5446 expected_len = sizeof(*cp) + key_count *
5447 sizeof(struct mgmt_ltk_info);
5448 if (expected_len != len) {
5449 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5451 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5452 MGMT_STATUS_INVALID_PARAMS);
5455 BT_DBG("%s key_count %u", hdev->name, key_count);
5457 for (i = 0; i < key_count; i++) {
5458 struct mgmt_ltk_info *key = &cp->keys[i];
5460 if (!ltk_is_valid(key))
5461 return mgmt_cmd_status(sk, hdev->id,
5462 MGMT_OP_LOAD_LONG_TERM_KEYS,
5463 MGMT_STATUS_INVALID_PARAMS);
5468 hci_smp_ltks_clear(hdev);
5470 for (i = 0; i < key_count; i++) {
5471 struct mgmt_ltk_info *key = &cp->keys[i];
5472 u8 type, addr_type, authenticated;
5474 if (key->addr.type == BDADDR_LE_PUBLIC)
5475 addr_type = ADDR_LE_DEV_PUBLIC;
5477 addr_type = ADDR_LE_DEV_RANDOM;
5479 switch (key->type) {
5480 case MGMT_LTK_UNAUTHENTICATED:
5481 authenticated = 0x00;
5482 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5484 case MGMT_LTK_AUTHENTICATED:
5485 authenticated = 0x01;
5486 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5488 case MGMT_LTK_P256_UNAUTH:
5489 authenticated = 0x00;
5490 type = SMP_LTK_P256;
5492 case MGMT_LTK_P256_AUTH:
5493 authenticated = 0x01;
5494 type = SMP_LTK_P256;
5496 case MGMT_LTK_P256_DEBUG:
5497 authenticated = 0x00;
5498 type = SMP_LTK_P256_DEBUG;
5503 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5504 authenticated, key->val, key->enc_size, key->ediv,
5508 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5511 hci_dev_unlock(hdev);
5516 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5518 struct hci_conn *conn = cmd->user_data;
5519 struct mgmt_rp_get_conn_info rp;
5522 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5524 if (status == MGMT_STATUS_SUCCESS) {
5525 rp.rssi = conn->rssi;
5526 rp.tx_power = conn->tx_power;
5527 rp.max_tx_power = conn->max_tx_power;
5529 rp.rssi = HCI_RSSI_INVALID;
5530 rp.tx_power = HCI_TX_POWER_INVALID;
5531 rp.max_tx_power = HCI_TX_POWER_INVALID;
5534 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5535 status, &rp, sizeof(rp));
5537 hci_conn_drop(conn);
5543 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5546 struct hci_cp_read_rssi *cp;
5547 struct mgmt_pending_cmd *cmd;
5548 struct hci_conn *conn;
5552 BT_DBG("status 0x%02x", hci_status);
5556 /* Commands sent in request are either Read RSSI or Read Transmit Power
5557 * Level so we check which one was last sent to retrieve connection
5558 * handle. Both commands have handle as first parameter so it's safe to
5559 * cast data on the same command struct.
5561 * First command sent is always Read RSSI and we fail only if it fails.
5562 * In other case we simply override error to indicate success as we
5563 * already remembered if TX power value is actually valid.
5565 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5567 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5568 status = MGMT_STATUS_SUCCESS;
5570 status = mgmt_status(hci_status);
5574 BT_ERR("invalid sent_cmd in conn_info response");
5578 handle = __le16_to_cpu(cp->handle);
5579 conn = hci_conn_hash_lookup_handle(hdev, handle);
5581 BT_ERR("unknown handle (%d) in conn_info response", handle);
5585 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5589 cmd->cmd_complete(cmd, status);
5590 mgmt_pending_remove(cmd);
5593 hci_dev_unlock(hdev);
5596 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5599 struct mgmt_cp_get_conn_info *cp = data;
5600 struct mgmt_rp_get_conn_info rp;
5601 struct hci_conn *conn;
5602 unsigned long conn_info_age;
5605 BT_DBG("%s", hdev->name);
5607 memset(&rp, 0, sizeof(rp));
5608 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5609 rp.addr.type = cp->addr.type;
5611 if (!bdaddr_type_is_valid(cp->addr.type))
5612 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5613 MGMT_STATUS_INVALID_PARAMS,
5618 if (!hdev_is_powered(hdev)) {
5619 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5620 MGMT_STATUS_NOT_POWERED, &rp,
5625 if (cp->addr.type == BDADDR_BREDR)
5626 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5629 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5631 if (!conn || conn->state != BT_CONNECTED) {
5632 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5633 MGMT_STATUS_NOT_CONNECTED, &rp,
5638 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5639 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5640 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5644 /* To avoid client trying to guess when to poll again for information we
5645 * calculate conn info age as random value between min/max set in hdev.
5647 conn_info_age = hdev->conn_info_min_age +
5648 prandom_u32_max(hdev->conn_info_max_age -
5649 hdev->conn_info_min_age);
5651 /* Query controller to refresh cached values if they are too old or were
5654 if (time_after(jiffies, conn->conn_info_timestamp +
5655 msecs_to_jiffies(conn_info_age)) ||
5656 !conn->conn_info_timestamp) {
5657 struct hci_request req;
5658 struct hci_cp_read_tx_power req_txp_cp;
5659 struct hci_cp_read_rssi req_rssi_cp;
5660 struct mgmt_pending_cmd *cmd;
5662 hci_req_init(&req, hdev);
5663 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5664 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5667 /* For LE links TX power does not change thus we don't need to
5668 * query for it once value is known.
5670 if (!bdaddr_type_is_le(cp->addr.type) ||
5671 conn->tx_power == HCI_TX_POWER_INVALID) {
5672 req_txp_cp.handle = cpu_to_le16(conn->handle);
5673 req_txp_cp.type = 0x00;
5674 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5675 sizeof(req_txp_cp), &req_txp_cp);
5678 /* Max TX power needs to be read only once per connection */
5679 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5680 req_txp_cp.handle = cpu_to_le16(conn->handle);
5681 req_txp_cp.type = 0x01;
5682 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5683 sizeof(req_txp_cp), &req_txp_cp);
5686 err = hci_req_run(&req, conn_info_refresh_complete);
5690 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5697 hci_conn_hold(conn);
5698 cmd->user_data = hci_conn_get(conn);
5699 cmd->cmd_complete = conn_info_cmd_complete;
5701 conn->conn_info_timestamp = jiffies;
5703 /* Cache is valid, just reply with values cached in hci_conn */
5704 rp.rssi = conn->rssi;
5705 rp.tx_power = conn->tx_power;
5706 rp.max_tx_power = conn->max_tx_power;
5708 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5709 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5713 hci_dev_unlock(hdev);
5717 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5719 struct hci_conn *conn = cmd->user_data;
5720 struct mgmt_rp_get_clock_info rp;
5721 struct hci_dev *hdev;
5724 memset(&rp, 0, sizeof(rp));
5725 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5730 hdev = hci_dev_get(cmd->index);
5732 rp.local_clock = cpu_to_le32(hdev->clock);
5737 rp.piconet_clock = cpu_to_le32(conn->clock);
5738 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5742 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5746 hci_conn_drop(conn);
5753 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5755 struct hci_cp_read_clock *hci_cp;
5756 struct mgmt_pending_cmd *cmd;
5757 struct hci_conn *conn;
5759 BT_DBG("%s status %u", hdev->name, status);
5763 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5767 if (hci_cp->which) {
5768 u16 handle = __le16_to_cpu(hci_cp->handle);
5769 conn = hci_conn_hash_lookup_handle(hdev, handle);
5774 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5778 cmd->cmd_complete(cmd, mgmt_status(status));
5779 mgmt_pending_remove(cmd);
5782 hci_dev_unlock(hdev);
5785 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5788 struct mgmt_cp_get_clock_info *cp = data;
5789 struct mgmt_rp_get_clock_info rp;
5790 struct hci_cp_read_clock hci_cp;
5791 struct mgmt_pending_cmd *cmd;
5792 struct hci_request req;
5793 struct hci_conn *conn;
5796 BT_DBG("%s", hdev->name);
5798 memset(&rp, 0, sizeof(rp));
5799 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5800 rp.addr.type = cp->addr.type;
5802 if (cp->addr.type != BDADDR_BREDR)
5803 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5804 MGMT_STATUS_INVALID_PARAMS,
5809 if (!hdev_is_powered(hdev)) {
5810 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5811 MGMT_STATUS_NOT_POWERED, &rp,
5816 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5817 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5819 if (!conn || conn->state != BT_CONNECTED) {
5820 err = mgmt_cmd_complete(sk, hdev->id,
5821 MGMT_OP_GET_CLOCK_INFO,
5822 MGMT_STATUS_NOT_CONNECTED,
5830 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5836 cmd->cmd_complete = clock_info_cmd_complete;
5838 hci_req_init(&req, hdev);
5840 memset(&hci_cp, 0, sizeof(hci_cp));
5841 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5844 hci_conn_hold(conn);
5845 cmd->user_data = hci_conn_get(conn);
5847 hci_cp.handle = cpu_to_le16(conn->handle);
5848 hci_cp.which = 0x01; /* Piconet clock */
5849 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5852 err = hci_req_run(&req, get_clock_info_complete);
5854 mgmt_pending_remove(cmd);
5857 hci_dev_unlock(hdev);
5861 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5863 struct hci_conn *conn;
5865 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5869 if (conn->dst_type != type)
5872 if (conn->state != BT_CONNECTED)
5878 /* This function requires the caller holds hdev->lock */
5879 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5880 u8 addr_type, u8 auto_connect)
5882 struct hci_dev *hdev = req->hdev;
5883 struct hci_conn_params *params;
5885 params = hci_conn_params_add(hdev, addr, addr_type);
5889 if (params->auto_connect == auto_connect)
5892 list_del_init(¶ms->action);
5894 switch (auto_connect) {
5895 case HCI_AUTO_CONN_DISABLED:
5896 case HCI_AUTO_CONN_LINK_LOSS:
5897 __hci_update_background_scan(req);
5899 case HCI_AUTO_CONN_REPORT:
5900 list_add(¶ms->action, &hdev->pend_le_reports);
5901 __hci_update_background_scan(req);
5903 case HCI_AUTO_CONN_DIRECT:
5904 case HCI_AUTO_CONN_ALWAYS:
5905 if (!is_connected(hdev, addr, addr_type)) {
5906 list_add(¶ms->action, &hdev->pend_le_conns);
5907 __hci_update_background_scan(req);
5912 params->auto_connect = auto_connect;
5914 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5920 static void device_added(struct sock *sk, struct hci_dev *hdev,
5921 bdaddr_t *bdaddr, u8 type, u8 action)
5923 struct mgmt_ev_device_added ev;
5925 bacpy(&ev.addr.bdaddr, bdaddr);
5926 ev.addr.type = type;
5929 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5932 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5934 struct mgmt_pending_cmd *cmd;
5936 BT_DBG("status 0x%02x", status);
5940 cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
5944 cmd->cmd_complete(cmd, mgmt_status(status));
5945 mgmt_pending_remove(cmd);
5948 hci_dev_unlock(hdev);
5951 static int add_device(struct sock *sk, struct hci_dev *hdev,
5952 void *data, u16 len)
5954 struct mgmt_cp_add_device *cp = data;
5955 struct mgmt_pending_cmd *cmd;
5956 struct hci_request req;
5957 u8 auto_conn, addr_type;
5960 BT_DBG("%s", hdev->name);
5962 if (!bdaddr_type_is_valid(cp->addr.type) ||
5963 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5964 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5965 MGMT_STATUS_INVALID_PARAMS,
5966 &cp->addr, sizeof(cp->addr));
5968 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5969 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5970 MGMT_STATUS_INVALID_PARAMS,
5971 &cp->addr, sizeof(cp->addr));
5973 hci_req_init(&req, hdev);
5977 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5983 cmd->cmd_complete = addr_cmd_complete;
5985 if (cp->addr.type == BDADDR_BREDR) {
5986 /* Only incoming connections action is supported for now */
5987 if (cp->action != 0x01) {
5988 err = cmd->cmd_complete(cmd,
5989 MGMT_STATUS_INVALID_PARAMS);
5990 mgmt_pending_remove(cmd);
5994 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5999 __hci_update_page_scan(&req);
6004 if (cp->addr.type == BDADDR_LE_PUBLIC)
6005 addr_type = ADDR_LE_DEV_PUBLIC;
6007 addr_type = ADDR_LE_DEV_RANDOM;
6009 if (cp->action == 0x02)
6010 auto_conn = HCI_AUTO_CONN_ALWAYS;
6011 else if (cp->action == 0x01)
6012 auto_conn = HCI_AUTO_CONN_DIRECT;
6014 auto_conn = HCI_AUTO_CONN_REPORT;
6016 /* If the connection parameters don't exist for this device,
6017 * they will be created and configured with defaults.
6019 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
6021 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
6022 mgmt_pending_remove(cmd);
6027 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6029 err = hci_req_run(&req, add_device_complete);
6031 /* ENODATA means no HCI commands were needed (e.g. if
6032 * the adapter is powered off).
6034 if (err == -ENODATA)
6035 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6036 mgmt_pending_remove(cmd);
6040 hci_dev_unlock(hdev);
6044 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6045 bdaddr_t *bdaddr, u8 type)
6047 struct mgmt_ev_device_removed ev;
6049 bacpy(&ev.addr.bdaddr, bdaddr);
6050 ev.addr.type = type;
6052 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6055 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6057 struct mgmt_pending_cmd *cmd;
6059 BT_DBG("status 0x%02x", status);
6063 cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
6067 cmd->cmd_complete(cmd, mgmt_status(status));
6068 mgmt_pending_remove(cmd);
6071 hci_dev_unlock(hdev);
6074 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6075 void *data, u16 len)
6077 struct mgmt_cp_remove_device *cp = data;
6078 struct mgmt_pending_cmd *cmd;
6079 struct hci_request req;
6082 BT_DBG("%s", hdev->name);
6084 hci_req_init(&req, hdev);
6088 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
6094 cmd->cmd_complete = addr_cmd_complete;
6096 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6097 struct hci_conn_params *params;
6100 if (!bdaddr_type_is_valid(cp->addr.type)) {
6101 err = cmd->cmd_complete(cmd,
6102 MGMT_STATUS_INVALID_PARAMS);
6103 mgmt_pending_remove(cmd);
6107 if (cp->addr.type == BDADDR_BREDR) {
6108 err = hci_bdaddr_list_del(&hdev->whitelist,
6112 err = cmd->cmd_complete(cmd,
6113 MGMT_STATUS_INVALID_PARAMS);
6114 mgmt_pending_remove(cmd);
6118 __hci_update_page_scan(&req);
6120 device_removed(sk, hdev, &cp->addr.bdaddr,
6125 if (cp->addr.type == BDADDR_LE_PUBLIC)
6126 addr_type = ADDR_LE_DEV_PUBLIC;
6128 addr_type = ADDR_LE_DEV_RANDOM;
6130 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6133 err = cmd->cmd_complete(cmd,
6134 MGMT_STATUS_INVALID_PARAMS);
6135 mgmt_pending_remove(cmd);
6139 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
6140 err = cmd->cmd_complete(cmd,
6141 MGMT_STATUS_INVALID_PARAMS);
6142 mgmt_pending_remove(cmd);
6146 list_del(¶ms->action);
6147 list_del(¶ms->list);
6149 __hci_update_background_scan(&req);
6151 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6153 struct hci_conn_params *p, *tmp;
6154 struct bdaddr_list *b, *btmp;
6156 if (cp->addr.type) {
6157 err = cmd->cmd_complete(cmd,
6158 MGMT_STATUS_INVALID_PARAMS);
6159 mgmt_pending_remove(cmd);
6163 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6164 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6169 __hci_update_page_scan(&req);
6171 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6172 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6174 device_removed(sk, hdev, &p->addr, p->addr_type);
6175 list_del(&p->action);
6180 BT_DBG("All LE connection parameters were removed");
6182 __hci_update_background_scan(&req);
6186 err = hci_req_run(&req, remove_device_complete);
6188 /* ENODATA means no HCI commands were needed (e.g. if
6189 * the adapter is powered off).
6191 if (err == -ENODATA)
6192 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6193 mgmt_pending_remove(cmd);
6197 hci_dev_unlock(hdev);
6201 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6204 struct mgmt_cp_load_conn_param *cp = data;
6205 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6206 sizeof(struct mgmt_conn_param));
6207 u16 param_count, expected_len;
6210 if (!lmp_le_capable(hdev))
6211 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6212 MGMT_STATUS_NOT_SUPPORTED);
6214 param_count = __le16_to_cpu(cp->param_count);
6215 if (param_count > max_param_count) {
6216 BT_ERR("load_conn_param: too big param_count value %u",
6218 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6219 MGMT_STATUS_INVALID_PARAMS);
6222 expected_len = sizeof(*cp) + param_count *
6223 sizeof(struct mgmt_conn_param);
6224 if (expected_len != len) {
6225 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6227 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6228 MGMT_STATUS_INVALID_PARAMS);
6231 BT_DBG("%s param_count %u", hdev->name, param_count);
6235 hci_conn_params_clear_disabled(hdev);
6237 for (i = 0; i < param_count; i++) {
6238 struct mgmt_conn_param *param = &cp->params[i];
6239 struct hci_conn_params *hci_param;
6240 u16 min, max, latency, timeout;
6243 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
6246 if (param->addr.type == BDADDR_LE_PUBLIC) {
6247 addr_type = ADDR_LE_DEV_PUBLIC;
6248 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6249 addr_type = ADDR_LE_DEV_RANDOM;
6251 BT_ERR("Ignoring invalid connection parameters");
6255 min = le16_to_cpu(param->min_interval);
6256 max = le16_to_cpu(param->max_interval);
6257 latency = le16_to_cpu(param->latency);
6258 timeout = le16_to_cpu(param->timeout);
6260 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6261 min, max, latency, timeout);
6263 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6264 BT_ERR("Ignoring invalid connection parameters");
6268 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6271 BT_ERR("Failed to add connection parameters");
6275 hci_param->conn_min_interval = min;
6276 hci_param->conn_max_interval = max;
6277 hci_param->conn_latency = latency;
6278 hci_param->supervision_timeout = timeout;
6281 hci_dev_unlock(hdev);
6283 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6287 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6288 void *data, u16 len)
6290 struct mgmt_cp_set_external_config *cp = data;
6294 BT_DBG("%s", hdev->name);
6296 if (hdev_is_powered(hdev))
6297 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6298 MGMT_STATUS_REJECTED);
6300 if (cp->config != 0x00 && cp->config != 0x01)
6301 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6302 MGMT_STATUS_INVALID_PARAMS);
6304 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6305 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6306 MGMT_STATUS_NOT_SUPPORTED);
6311 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6313 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6315 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6322 err = new_options(hdev, sk);
6324 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6325 mgmt_index_removed(hdev);
6327 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6328 hci_dev_set_flag(hdev, HCI_CONFIG);
6329 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6331 queue_work(hdev->req_workqueue, &hdev->power_on);
6333 set_bit(HCI_RAW, &hdev->flags);
6334 mgmt_index_added(hdev);
6339 hci_dev_unlock(hdev);
6343 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6344 void *data, u16 len)
6346 struct mgmt_cp_set_public_address *cp = data;
6350 BT_DBG("%s", hdev->name);
6352 if (hdev_is_powered(hdev))
6353 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6354 MGMT_STATUS_REJECTED);
6356 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6357 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6358 MGMT_STATUS_INVALID_PARAMS);
6360 if (!hdev->set_bdaddr)
6361 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6362 MGMT_STATUS_NOT_SUPPORTED);
6366 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6367 bacpy(&hdev->public_addr, &cp->bdaddr);
6369 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6376 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6377 err = new_options(hdev, sk);
6379 if (is_configured(hdev)) {
6380 mgmt_index_removed(hdev);
6382 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6384 hci_dev_set_flag(hdev, HCI_CONFIG);
6385 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6387 queue_work(hdev->req_workqueue, &hdev->power_on);
6391 hci_dev_unlock(hdev);
6395 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6398 eir[eir_len++] = sizeof(type) + data_len;
6399 eir[eir_len++] = type;
6400 memcpy(&eir[eir_len], data, data_len);
6401 eir_len += data_len;
6406 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6407 void *data, u16 data_len)
6409 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6410 struct mgmt_rp_read_local_oob_ext_data *rp;
6413 u8 status, flags, role, addr[7], hash[16], rand[16];
6416 BT_DBG("%s", hdev->name);
6418 if (!hdev_is_powered(hdev))
6419 return mgmt_cmd_complete(sk, hdev->id,
6420 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6421 MGMT_STATUS_NOT_POWERED,
6422 &cp->type, sizeof(cp->type));
6425 case BIT(BDADDR_BREDR):
6426 status = mgmt_bredr_support(hdev);
6428 return mgmt_cmd_complete(sk, hdev->id,
6429 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6434 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6435 status = mgmt_le_support(hdev);
6437 return mgmt_cmd_complete(sk, hdev->id,
6438 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6441 eir_len = 9 + 3 + 18 + 18 + 3;
6444 return mgmt_cmd_complete(sk, hdev->id,
6445 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6446 MGMT_STATUS_INVALID_PARAMS,
6447 &cp->type, sizeof(cp->type));
6452 rp_len = sizeof(*rp) + eir_len;
6453 rp = kmalloc(rp_len, GFP_ATOMIC);
6455 hci_dev_unlock(hdev);
6461 case BIT(BDADDR_BREDR):
6462 eir_len = eir_append_data(rp->eir, eir_len, EIR_CLASS_OF_DEV,
6463 hdev->dev_class, 3);
6465 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6466 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6467 smp_generate_oob(hdev, hash, rand) < 0) {
6468 hci_dev_unlock(hdev);
6469 err = mgmt_cmd_complete(sk, hdev->id,
6470 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6472 &cp->type, sizeof(cp->type));
6476 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6477 memcpy(addr, &hdev->rpa, 6);
6479 } else if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6480 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6481 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6482 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6483 memcpy(addr, &hdev->static_addr, 6);
6486 memcpy(addr, &hdev->bdaddr, 6);
6490 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6491 addr, sizeof(addr));
6493 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6498 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6499 &role, sizeof(role));
6501 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6502 eir_len = eir_append_data(rp->eir, eir_len,
6504 hash, sizeof(hash));
6506 eir_len = eir_append_data(rp->eir, eir_len,
6508 rand, sizeof(rand));
6511 flags = get_adv_discov_flags(hdev);
6513 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6514 flags |= LE_AD_NO_BREDR;
6516 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6517 &flags, sizeof(flags));
6521 rp->type = cp->type;
6522 rp->eir_len = cpu_to_le16(eir_len);
6524 hci_dev_unlock(hdev);
6526 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6528 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6529 MGMT_STATUS_SUCCESS, rp, sizeof(*rp) + eir_len);
6533 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6534 rp, sizeof(*rp) + eir_len,
6535 HCI_MGMT_OOB_DATA_EVENTS, sk);
6543 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6547 flags |= MGMT_ADV_FLAG_CONNECTABLE;
6548 flags |= MGMT_ADV_FLAG_DISCOV;
6549 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6550 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6552 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
6553 flags |= MGMT_ADV_FLAG_TX_POWER;
6558 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6559 void *data, u16 data_len)
6561 struct mgmt_rp_read_adv_features *rp;
6565 u32 supported_flags;
6567 BT_DBG("%s", hdev->name);
6569 if (!lmp_le_capable(hdev))
6570 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6571 MGMT_STATUS_REJECTED);
6575 rp_len = sizeof(*rp);
6577 /* Currently only one instance is supported, so just add 1 to the
6580 instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
6584 rp = kmalloc(rp_len, GFP_ATOMIC);
6586 hci_dev_unlock(hdev);
6590 supported_flags = get_supported_adv_flags(hdev);
6592 rp->supported_flags = cpu_to_le32(supported_flags);
6593 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6594 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6595 rp->max_instances = 1;
6597 /* Currently only one instance is supported, so simply return the
6598 * current instance number.
6601 rp->num_instances = 1;
6602 rp->instance[0] = 1;
6604 rp->num_instances = 0;
6607 hci_dev_unlock(hdev);
6609 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6610 MGMT_STATUS_SUCCESS, rp, rp_len);
6617 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6618 u8 len, bool is_adv_data)
6620 u8 max_len = HCI_MAX_AD_LENGTH;
6622 bool flags_managed = false;
6623 bool tx_power_managed = false;
6624 u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
6625 MGMT_ADV_FLAG_MANAGED_FLAGS;
6627 if (is_adv_data && (adv_flags & flags_params)) {
6628 flags_managed = true;
6632 if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
6633 tx_power_managed = true;
6640 /* Make sure that the data is correctly formatted. */
6641 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6644 if (flags_managed && data[i + 1] == EIR_FLAGS)
6647 if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
6650 /* If the current field length would exceed the total data
6651 * length, then it's invalid.
6653 if (i + cur_len >= len)
6660 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6663 struct mgmt_pending_cmd *cmd;
6664 struct mgmt_rp_add_advertising rp;
6666 BT_DBG("status %d", status);
6670 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6673 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6674 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6675 advertising_removed(cmd ? cmd->sk : NULL, hdev, 1);
6684 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6685 mgmt_status(status));
6687 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6688 mgmt_status(status), &rp, sizeof(rp));
6690 mgmt_pending_remove(cmd);
6693 hci_dev_unlock(hdev);
6696 static void adv_timeout_expired(struct work_struct *work)
6698 struct hci_dev *hdev = container_of(work, struct hci_dev,
6699 adv_instance.timeout_exp.work);
6701 hdev->adv_instance.timeout = 0;
6704 clear_adv_instance(hdev);
6705 hci_dev_unlock(hdev);
6708 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6709 void *data, u16 data_len)
6711 struct mgmt_cp_add_advertising *cp = data;
6712 struct mgmt_rp_add_advertising rp;
6714 u32 supported_flags;
6718 struct mgmt_pending_cmd *cmd;
6719 struct hci_request req;
6721 BT_DBG("%s", hdev->name);
6723 status = mgmt_le_support(hdev);
6725 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6728 flags = __le32_to_cpu(cp->flags);
6729 timeout = __le16_to_cpu(cp->timeout);
6731 /* The current implementation only supports adding one instance and only
6732 * a subset of the specified flags.
6734 supported_flags = get_supported_adv_flags(hdev);
6735 if (cp->instance != 0x01 || (flags & ~supported_flags))
6736 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6737 MGMT_STATUS_INVALID_PARAMS);
6741 if (timeout && !hdev_is_powered(hdev)) {
6742 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6743 MGMT_STATUS_REJECTED);
6747 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6748 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6749 pending_find(MGMT_OP_SET_LE, hdev)) {
6750 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6755 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6756 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6757 cp->scan_rsp_len, false)) {
6758 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6759 MGMT_STATUS_INVALID_PARAMS);
6763 INIT_DELAYED_WORK(&hdev->adv_instance.timeout_exp, adv_timeout_expired);
6765 hdev->adv_instance.flags = flags;
6766 hdev->adv_instance.adv_data_len = cp->adv_data_len;
6767 hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len;
6769 if (cp->adv_data_len)
6770 memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len);
6772 if (cp->scan_rsp_len)
6773 memcpy(hdev->adv_instance.scan_rsp_data,
6774 cp->data + cp->adv_data_len, cp->scan_rsp_len);
6776 if (hdev->adv_instance.timeout)
6777 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
6779 hdev->adv_instance.timeout = timeout;
6782 queue_delayed_work(hdev->workqueue,
6783 &hdev->adv_instance.timeout_exp,
6784 msecs_to_jiffies(timeout * 1000));
6786 if (!hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING_INSTANCE))
6787 advertising_added(sk, hdev, 1);
6789 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
6790 * we have no HCI communication to make. Simply return.
6792 if (!hdev_is_powered(hdev) ||
6793 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6795 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6796 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6800 /* We're good to go, update advertising data, parameters, and start
6803 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6810 hci_req_init(&req, hdev);
6812 update_adv_data(&req);
6813 update_scan_rsp_data(&req);
6814 enable_advertising(&req);
6816 err = hci_req_run(&req, add_advertising_complete);
6818 mgmt_pending_remove(cmd);
6821 hci_dev_unlock(hdev);
6826 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6829 struct mgmt_pending_cmd *cmd;
6830 struct mgmt_rp_remove_advertising rp;
6832 BT_DBG("status %d", status);
6836 /* A failure status here only means that we failed to disable
6837 * advertising. Otherwise, the advertising instance has been removed,
6838 * so report success.
6840 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6846 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6848 mgmt_pending_remove(cmd);
6851 hci_dev_unlock(hdev);
6854 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6855 void *data, u16 data_len)
6857 struct mgmt_cp_remove_advertising *cp = data;
6858 struct mgmt_rp_remove_advertising rp;
6860 struct mgmt_pending_cmd *cmd;
6861 struct hci_request req;
6863 BT_DBG("%s", hdev->name);
6865 /* The current implementation only allows modifying instance no 1. A
6866 * value of 0 indicates that all instances should be cleared.
6868 if (cp->instance > 1)
6869 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6870 MGMT_STATUS_INVALID_PARAMS);
6874 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6875 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6876 pending_find(MGMT_OP_SET_LE, hdev)) {
6877 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6882 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
6883 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6884 MGMT_STATUS_INVALID_PARAMS);
6888 if (hdev->adv_instance.timeout)
6889 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
6891 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6893 advertising_removed(sk, hdev, 1);
6895 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6897 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
6898 * we have no HCI communication to make. Simply return.
6900 if (!hdev_is_powered(hdev) ||
6901 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6903 err = mgmt_cmd_complete(sk, hdev->id,
6904 MGMT_OP_REMOVE_ADVERTISING,
6905 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6909 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6916 hci_req_init(&req, hdev);
6917 disable_advertising(&req);
6919 err = hci_req_run(&req, remove_advertising_complete);
6921 mgmt_pending_remove(cmd);
6924 hci_dev_unlock(hdev);
6929 static const struct hci_mgmt_handler mgmt_handlers[] = {
6930 { NULL }, /* 0x0000 (no command) */
6931 { read_version, MGMT_READ_VERSION_SIZE,
6933 HCI_MGMT_UNTRUSTED },
6934 { read_commands, MGMT_READ_COMMANDS_SIZE,
6936 HCI_MGMT_UNTRUSTED },
6937 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6939 HCI_MGMT_UNTRUSTED },
6940 { read_controller_info, MGMT_READ_INFO_SIZE,
6941 HCI_MGMT_UNTRUSTED },
6942 { set_powered, MGMT_SETTING_SIZE },
6943 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
6944 { set_connectable, MGMT_SETTING_SIZE },
6945 { set_fast_connectable, MGMT_SETTING_SIZE },
6946 { set_bondable, MGMT_SETTING_SIZE },
6947 { set_link_security, MGMT_SETTING_SIZE },
6948 { set_ssp, MGMT_SETTING_SIZE },
6949 { set_hs, MGMT_SETTING_SIZE },
6950 { set_le, MGMT_SETTING_SIZE },
6951 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
6952 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
6953 { add_uuid, MGMT_ADD_UUID_SIZE },
6954 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
6955 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6957 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6959 { disconnect, MGMT_DISCONNECT_SIZE },
6960 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
6961 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
6962 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6963 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
6964 { pair_device, MGMT_PAIR_DEVICE_SIZE },
6965 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6966 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
6967 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
6968 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6969 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
6970 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6971 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6972 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6974 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6975 { start_discovery, MGMT_START_DISCOVERY_SIZE },
6976 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
6977 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
6978 { block_device, MGMT_BLOCK_DEVICE_SIZE },
6979 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
6980 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
6981 { set_advertising, MGMT_SETTING_SIZE },
6982 { set_bredr, MGMT_SETTING_SIZE },
6983 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
6984 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
6985 { set_secure_conn, MGMT_SETTING_SIZE },
6986 { set_debug_keys, MGMT_SETTING_SIZE },
6987 { set_privacy, MGMT_SET_PRIVACY_SIZE },
6988 { load_irks, MGMT_LOAD_IRKS_SIZE,
6990 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
6991 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
6992 { add_device, MGMT_ADD_DEVICE_SIZE },
6993 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
6994 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6996 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6998 HCI_MGMT_UNTRUSTED },
6999 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
7000 HCI_MGMT_UNCONFIGURED |
7001 HCI_MGMT_UNTRUSTED },
7002 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
7003 HCI_MGMT_UNCONFIGURED },
7004 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
7005 HCI_MGMT_UNCONFIGURED },
7006 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7008 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7009 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
7011 HCI_MGMT_UNTRUSTED },
7012 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
7013 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
7015 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
7018 void mgmt_index_added(struct hci_dev *hdev)
7020 struct mgmt_ev_ext_index ev;
7022 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7025 switch (hdev->dev_type) {
7027 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7028 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7029 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7032 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7033 HCI_MGMT_INDEX_EVENTS);
7046 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7047 HCI_MGMT_EXT_INDEX_EVENTS);
7050 void mgmt_index_removed(struct hci_dev *hdev)
7052 struct mgmt_ev_ext_index ev;
7053 u8 status = MGMT_STATUS_INVALID_INDEX;
7055 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7058 switch (hdev->dev_type) {
7060 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7062 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7063 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7064 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7067 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7068 HCI_MGMT_INDEX_EVENTS);
7081 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7082 HCI_MGMT_EXT_INDEX_EVENTS);
7085 /* This function requires the caller holds hdev->lock */
7086 static void restart_le_actions(struct hci_request *req)
7088 struct hci_dev *hdev = req->hdev;
7089 struct hci_conn_params *p;
7091 list_for_each_entry(p, &hdev->le_conn_params, list) {
7092 /* Needed for AUTO_OFF case where might not "really"
7093 * have been powered off.
7095 list_del_init(&p->action);
7097 switch (p->auto_connect) {
7098 case HCI_AUTO_CONN_DIRECT:
7099 case HCI_AUTO_CONN_ALWAYS:
7100 list_add(&p->action, &hdev->pend_le_conns);
7102 case HCI_AUTO_CONN_REPORT:
7103 list_add(&p->action, &hdev->pend_le_reports);
7110 __hci_update_background_scan(req);
7113 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7115 struct cmd_lookup match = { NULL, hdev };
7117 BT_DBG("status 0x%02x", status);
7120 /* Register the available SMP channels (BR/EDR and LE) only
7121 * when successfully powering on the controller. This late
7122 * registration is required so that LE SMP can clearly
7123 * decide if the public address or static address is used.
7130 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7132 new_settings(hdev, match.sk);
7134 hci_dev_unlock(hdev);
7140 static int powered_update_hci(struct hci_dev *hdev)
7142 struct hci_request req;
7145 hci_req_init(&req, hdev);
7147 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
7148 !lmp_host_ssp_capable(hdev)) {
7151 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
7153 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
7156 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
7157 sizeof(support), &support);
7161 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7162 lmp_bredr_capable(hdev)) {
7163 struct hci_cp_write_le_host_supported cp;
7168 /* Check first if we already have the right
7169 * host state (host features set)
7171 if (cp.le != lmp_host_le_capable(hdev) ||
7172 cp.simul != lmp_host_le_br_capable(hdev))
7173 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
7177 if (lmp_le_capable(hdev)) {
7178 /* Make sure the controller has a good default for
7179 * advertising data. This also applies to the case
7180 * where BR/EDR was toggled during the AUTO_OFF phase.
7182 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
7183 update_adv_data(&req);
7184 update_scan_rsp_data(&req);
7187 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7188 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
7189 enable_advertising(&req);
7191 restart_le_actions(&req);
7194 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
7195 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
7196 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
7197 sizeof(link_sec), &link_sec);
7199 if (lmp_bredr_capable(hdev)) {
7200 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
7201 write_fast_connectable(&req, true);
7203 write_fast_connectable(&req, false);
7204 __hci_update_page_scan(&req);
7210 return hci_req_run(&req, powered_complete);
7213 int mgmt_powered(struct hci_dev *hdev, u8 powered)
7215 struct cmd_lookup match = { NULL, hdev };
7216 u8 status, zero_cod[] = { 0, 0, 0 };
7219 if (!hci_dev_test_flag(hdev, HCI_MGMT))
7223 if (powered_update_hci(hdev) == 0)
7226 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
7231 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7233 /* If the power off is because of hdev unregistration let
7234 * use the appropriate INVALID_INDEX status. Otherwise use
7235 * NOT_POWERED. We cover both scenarios here since later in
7236 * mgmt_index_removed() any hci_conn callbacks will have already
7237 * been triggered, potentially causing misleading DISCONNECTED
7240 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7241 status = MGMT_STATUS_INVALID_INDEX;
7243 status = MGMT_STATUS_NOT_POWERED;
7245 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7247 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
7248 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7249 zero_cod, sizeof(zero_cod), NULL);
7252 err = new_settings(hdev, match.sk);
7260 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7262 struct mgmt_pending_cmd *cmd;
7265 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7269 if (err == -ERFKILL)
7270 status = MGMT_STATUS_RFKILLED;
7272 status = MGMT_STATUS_FAILED;
7274 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7276 mgmt_pending_remove(cmd);
7279 void mgmt_discoverable_timeout(struct hci_dev *hdev)
7281 struct hci_request req;
7285 /* When discoverable timeout triggers, then just make sure
7286 * the limited discoverable flag is cleared. Even in the case
7287 * of a timeout triggered from general discoverable, it is
7288 * safe to unconditionally clear the flag.
7290 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
7291 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
7293 hci_req_init(&req, hdev);
7294 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7295 u8 scan = SCAN_PAGE;
7296 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
7297 sizeof(scan), &scan);
7301 /* Advertising instances don't use the global discoverable setting, so
7302 * only update AD if advertising was enabled using Set Advertising.
7304 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7305 update_adv_data(&req);
7307 hci_req_run(&req, NULL);
7309 hdev->discov_timeout = 0;
7311 new_settings(hdev, NULL);
7313 hci_dev_unlock(hdev);
7316 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7319 struct mgmt_ev_new_link_key ev;
7321 memset(&ev, 0, sizeof(ev));
7323 ev.store_hint = persistent;
7324 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7325 ev.key.addr.type = BDADDR_BREDR;
7326 ev.key.type = key->type;
7327 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7328 ev.key.pin_len = key->pin_len;
7330 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7333 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7335 switch (ltk->type) {
7338 if (ltk->authenticated)
7339 return MGMT_LTK_AUTHENTICATED;
7340 return MGMT_LTK_UNAUTHENTICATED;
7342 if (ltk->authenticated)
7343 return MGMT_LTK_P256_AUTH;
7344 return MGMT_LTK_P256_UNAUTH;
7345 case SMP_LTK_P256_DEBUG:
7346 return MGMT_LTK_P256_DEBUG;
7349 return MGMT_LTK_UNAUTHENTICATED;
7352 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7354 struct mgmt_ev_new_long_term_key ev;
7356 memset(&ev, 0, sizeof(ev));
7358 /* Devices using resolvable or non-resolvable random addresses
7359 * without providing an indentity resolving key don't require
7360 * to store long term keys. Their addresses will change the
7363 * Only when a remote device provides an identity address
7364 * make sure the long term key is stored. If the remote
7365 * identity is known, the long term keys are internally
7366 * mapped to the identity address. So allow static random
7367 * and public addresses here.
7369 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7370 (key->bdaddr.b[5] & 0xc0) != 0xc0)
7371 ev.store_hint = 0x00;
7373 ev.store_hint = persistent;
7375 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7376 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7377 ev.key.type = mgmt_ltk_type(key);
7378 ev.key.enc_size = key->enc_size;
7379 ev.key.ediv = key->ediv;
7380 ev.key.rand = key->rand;
7382 if (key->type == SMP_LTK)
7385 memcpy(ev.key.val, key->val, sizeof(key->val));
7387 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7390 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
7392 struct mgmt_ev_new_irk ev;
7394 memset(&ev, 0, sizeof(ev));
7396 /* For identity resolving keys from devices that are already
7397 * using a public address or static random address, do not
7398 * ask for storing this key. The identity resolving key really
7399 * is only mandatory for devices using resovlable random
7402 * Storing all identity resolving keys has the downside that
7403 * they will be also loaded on next boot of they system. More
7404 * identity resolving keys, means more time during scanning is
7405 * needed to actually resolve these addresses.
7407 if (bacmp(&irk->rpa, BDADDR_ANY))
7408 ev.store_hint = 0x01;
7410 ev.store_hint = 0x00;
7412 bacpy(&ev.rpa, &irk->rpa);
7413 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7414 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7415 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7417 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7420 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7423 struct mgmt_ev_new_csrk ev;
7425 memset(&ev, 0, sizeof(ev));
7427 /* Devices using resolvable or non-resolvable random addresses
7428 * without providing an indentity resolving key don't require
7429 * to store signature resolving keys. Their addresses will change
7430 * the next time around.
7432 * Only when a remote device provides an identity address
7433 * make sure the signature resolving key is stored. So allow
7434 * static random and public addresses here.
7436 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7437 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7438 ev.store_hint = 0x00;
7440 ev.store_hint = persistent;
7442 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7443 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7444 ev.key.type = csrk->type;
7445 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7447 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7450 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7451 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7452 u16 max_interval, u16 latency, u16 timeout)
7454 struct mgmt_ev_new_conn_param ev;
7456 if (!hci_is_identity_address(bdaddr, bdaddr_type))
7459 memset(&ev, 0, sizeof(ev));
7460 bacpy(&ev.addr.bdaddr, bdaddr);
7461 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7462 ev.store_hint = store_hint;
7463 ev.min_interval = cpu_to_le16(min_interval);
7464 ev.max_interval = cpu_to_le16(max_interval);
7465 ev.latency = cpu_to_le16(latency);
7466 ev.timeout = cpu_to_le16(timeout);
7468 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7471 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7472 u32 flags, u8 *name, u8 name_len)
7475 struct mgmt_ev_device_connected *ev = (void *) buf;
7478 bacpy(&ev->addr.bdaddr, &conn->dst);
7479 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7481 ev->flags = __cpu_to_le32(flags);
7483 /* We must ensure that the EIR Data fields are ordered and
7484 * unique. Keep it simple for now and avoid the problem by not
7485 * adding any BR/EDR data to the LE adv.
7487 if (conn->le_adv_data_len > 0) {
7488 memcpy(&ev->eir[eir_len],
7489 conn->le_adv_data, conn->le_adv_data_len);
7490 eir_len = conn->le_adv_data_len;
7493 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7496 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7497 eir_len = eir_append_data(ev->eir, eir_len,
7499 conn->dev_class, 3);
7502 ev->eir_len = cpu_to_le16(eir_len);
7504 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7505 sizeof(*ev) + eir_len, NULL);
7508 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7510 struct sock **sk = data;
7512 cmd->cmd_complete(cmd, 0);
7517 mgmt_pending_remove(cmd);
7520 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7522 struct hci_dev *hdev = data;
7523 struct mgmt_cp_unpair_device *cp = cmd->param;
7525 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7527 cmd->cmd_complete(cmd, 0);
7528 mgmt_pending_remove(cmd);
7531 bool mgmt_powering_down(struct hci_dev *hdev)
7533 struct mgmt_pending_cmd *cmd;
7534 struct mgmt_mode *cp;
7536 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7547 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7548 u8 link_type, u8 addr_type, u8 reason,
7549 bool mgmt_connected)
7551 struct mgmt_ev_device_disconnected ev;
7552 struct sock *sk = NULL;
7554 /* The connection is still in hci_conn_hash so test for 1
7555 * instead of 0 to know if this is the last one.
7557 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7558 cancel_delayed_work(&hdev->power_off);
7559 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7562 if (!mgmt_connected)
7565 if (link_type != ACL_LINK && link_type != LE_LINK)
7568 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7570 bacpy(&ev.addr.bdaddr, bdaddr);
7571 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7574 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7579 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7583 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7584 u8 link_type, u8 addr_type, u8 status)
7586 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7587 struct mgmt_cp_disconnect *cp;
7588 struct mgmt_pending_cmd *cmd;
7590 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7593 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7599 if (bacmp(bdaddr, &cp->addr.bdaddr))
7602 if (cp->addr.type != bdaddr_type)
7605 cmd->cmd_complete(cmd, mgmt_status(status));
7606 mgmt_pending_remove(cmd);
7609 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7610 u8 addr_type, u8 status)
7612 struct mgmt_ev_connect_failed ev;
7614 /* The connection is still in hci_conn_hash so test for 1
7615 * instead of 0 to know if this is the last one.
7617 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7618 cancel_delayed_work(&hdev->power_off);
7619 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7622 bacpy(&ev.addr.bdaddr, bdaddr);
7623 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7624 ev.status = mgmt_status(status);
7626 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7629 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7631 struct mgmt_ev_pin_code_request ev;
7633 bacpy(&ev.addr.bdaddr, bdaddr);
7634 ev.addr.type = BDADDR_BREDR;
7637 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7640 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7643 struct mgmt_pending_cmd *cmd;
7645 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7649 cmd->cmd_complete(cmd, mgmt_status(status));
7650 mgmt_pending_remove(cmd);
7653 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7656 struct mgmt_pending_cmd *cmd;
7658 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7662 cmd->cmd_complete(cmd, mgmt_status(status));
7663 mgmt_pending_remove(cmd);
7666 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7667 u8 link_type, u8 addr_type, u32 value,
7670 struct mgmt_ev_user_confirm_request ev;
7672 BT_DBG("%s", hdev->name);
7674 bacpy(&ev.addr.bdaddr, bdaddr);
7675 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7676 ev.confirm_hint = confirm_hint;
7677 ev.value = cpu_to_le32(value);
7679 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7683 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7684 u8 link_type, u8 addr_type)
7686 struct mgmt_ev_user_passkey_request ev;
7688 BT_DBG("%s", hdev->name);
7690 bacpy(&ev.addr.bdaddr, bdaddr);
7691 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7693 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7697 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7698 u8 link_type, u8 addr_type, u8 status,
7701 struct mgmt_pending_cmd *cmd;
7703 cmd = pending_find(opcode, hdev);
7707 cmd->cmd_complete(cmd, mgmt_status(status));
7708 mgmt_pending_remove(cmd);
7713 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7714 u8 link_type, u8 addr_type, u8 status)
7716 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7717 status, MGMT_OP_USER_CONFIRM_REPLY);
7720 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7721 u8 link_type, u8 addr_type, u8 status)
7723 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7725 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7728 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7729 u8 link_type, u8 addr_type, u8 status)
7731 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7732 status, MGMT_OP_USER_PASSKEY_REPLY);
7735 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7736 u8 link_type, u8 addr_type, u8 status)
7738 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7740 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7743 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7744 u8 link_type, u8 addr_type, u32 passkey,
7747 struct mgmt_ev_passkey_notify ev;
7749 BT_DBG("%s", hdev->name);
7751 bacpy(&ev.addr.bdaddr, bdaddr);
7752 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7753 ev.passkey = __cpu_to_le32(passkey);
7754 ev.entered = entered;
7756 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7759 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7761 struct mgmt_ev_auth_failed ev;
7762 struct mgmt_pending_cmd *cmd;
7763 u8 status = mgmt_status(hci_status);
7765 bacpy(&ev.addr.bdaddr, &conn->dst);
7766 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7769 cmd = find_pairing(conn);
7771 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7772 cmd ? cmd->sk : NULL);
7775 cmd->cmd_complete(cmd, status);
7776 mgmt_pending_remove(cmd);
7780 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7782 struct cmd_lookup match = { NULL, hdev };
7786 u8 mgmt_err = mgmt_status(status);
7787 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7788 cmd_status_rsp, &mgmt_err);
7792 if (test_bit(HCI_AUTH, &hdev->flags))
7793 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7795 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7797 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7801 new_settings(hdev, match.sk);
7807 static void clear_eir(struct hci_request *req)
7809 struct hci_dev *hdev = req->hdev;
7810 struct hci_cp_write_eir cp;
7812 if (!lmp_ext_inq_capable(hdev))
7815 memset(hdev->eir, 0, sizeof(hdev->eir));
7817 memset(&cp, 0, sizeof(cp));
7819 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7822 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7824 struct cmd_lookup match = { NULL, hdev };
7825 struct hci_request req;
7826 bool changed = false;
7829 u8 mgmt_err = mgmt_status(status);
7831 if (enable && hci_dev_test_and_clear_flag(hdev,
7833 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7834 new_settings(hdev, NULL);
7837 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7843 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7845 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7847 changed = hci_dev_test_and_clear_flag(hdev,
7850 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7853 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7856 new_settings(hdev, match.sk);
7861 hci_req_init(&req, hdev);
7863 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7864 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7865 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7866 sizeof(enable), &enable);
7872 hci_req_run(&req, NULL);
7875 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7877 struct cmd_lookup *match = data;
7879 if (match->sk == NULL) {
7880 match->sk = cmd->sk;
7881 sock_hold(match->sk);
7885 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7888 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7890 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7891 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7892 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7895 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7896 dev_class, 3, NULL);
7902 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7904 struct mgmt_cp_set_local_name ev;
7905 struct mgmt_pending_cmd *cmd;
7910 memset(&ev, 0, sizeof(ev));
7911 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7912 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7914 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7916 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7918 /* If this is a HCI command related to powering on the
7919 * HCI dev don't send any mgmt signals.
7921 if (pending_find(MGMT_OP_SET_POWERED, hdev))
7925 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7926 cmd ? cmd->sk : NULL);
7929 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7930 u8 *rand192, u8 *hash256, u8 *rand256,
7933 struct mgmt_pending_cmd *cmd;
7935 BT_DBG("%s status %u", hdev->name, status);
7937 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7942 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7943 mgmt_status(status));
7945 struct mgmt_rp_read_local_oob_data rp;
7946 size_t rp_size = sizeof(rp);
7948 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7949 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7951 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7952 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7953 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7955 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7958 mgmt_cmd_complete(cmd->sk, hdev->id,
7959 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7963 mgmt_pending_remove(cmd);
7966 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7970 for (i = 0; i < uuid_count; i++) {
7971 if (!memcmp(uuid, uuids[i], 16))
7978 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7982 while (parsed < eir_len) {
7983 u8 field_len = eir[0];
7990 if (eir_len - parsed < field_len + 1)
7994 case EIR_UUID16_ALL:
7995 case EIR_UUID16_SOME:
7996 for (i = 0; i + 3 <= field_len; i += 2) {
7997 memcpy(uuid, bluetooth_base_uuid, 16);
7998 uuid[13] = eir[i + 3];
7999 uuid[12] = eir[i + 2];
8000 if (has_uuid(uuid, uuid_count, uuids))
8004 case EIR_UUID32_ALL:
8005 case EIR_UUID32_SOME:
8006 for (i = 0; i + 5 <= field_len; i += 4) {
8007 memcpy(uuid, bluetooth_base_uuid, 16);
8008 uuid[15] = eir[i + 5];
8009 uuid[14] = eir[i + 4];
8010 uuid[13] = eir[i + 3];
8011 uuid[12] = eir[i + 2];
8012 if (has_uuid(uuid, uuid_count, uuids))
8016 case EIR_UUID128_ALL:
8017 case EIR_UUID128_SOME:
8018 for (i = 0; i + 17 <= field_len; i += 16) {
8019 memcpy(uuid, eir + i + 2, 16);
8020 if (has_uuid(uuid, uuid_count, uuids))
8026 parsed += field_len + 1;
8027 eir += field_len + 1;
8033 static void restart_le_scan(struct hci_dev *hdev)
8035 /* If controller is not scanning we are done. */
8036 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8039 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8040 hdev->discovery.scan_start +
8041 hdev->discovery.scan_duration))
8044 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
8045 DISCOV_LE_RESTART_DELAY);
8048 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8049 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8051 /* If a RSSI threshold has been specified, and
8052 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8053 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8054 * is set, let it through for further processing, as we might need to
8057 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8058 * the results are also dropped.
8060 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8061 (rssi == HCI_RSSI_INVALID ||
8062 (rssi < hdev->discovery.rssi &&
8063 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8066 if (hdev->discovery.uuid_count != 0) {
8067 /* If a list of UUIDs is provided in filter, results with no
8068 * matching UUID should be dropped.
8070 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8071 hdev->discovery.uuids) &&
8072 !eir_has_uuids(scan_rsp, scan_rsp_len,
8073 hdev->discovery.uuid_count,
8074 hdev->discovery.uuids))
8078 /* If duplicate filtering does not report RSSI changes, then restart
8079 * scanning to ensure updated result with updated RSSI values.
8081 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8082 restart_le_scan(hdev);
8084 /* Validate RSSI value against the RSSI threshold once more. */
8085 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8086 rssi < hdev->discovery.rssi)
8093 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8094 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8095 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8098 struct mgmt_ev_device_found *ev = (void *)buf;
8101 /* Don't send events for a non-kernel initiated discovery. With
8102 * LE one exception is if we have pend_le_reports > 0 in which
8103 * case we're doing passive scanning and want these events.
8105 if (!hci_discovery_active(hdev)) {
8106 if (link_type == ACL_LINK)
8108 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
8112 if (hdev->discovery.result_filtering) {
8113 /* We are using service discovery */
8114 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8119 /* Make sure that the buffer is big enough. The 5 extra bytes
8120 * are for the potential CoD field.
8122 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8125 memset(buf, 0, sizeof(buf));
8127 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8128 * RSSI value was reported as 0 when not available. This behavior
8129 * is kept when using device discovery. This is required for full
8130 * backwards compatibility with the API.
8132 * However when using service discovery, the value 127 will be
8133 * returned when the RSSI is not available.
8135 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8136 link_type == ACL_LINK)
8139 bacpy(&ev->addr.bdaddr, bdaddr);
8140 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8142 ev->flags = cpu_to_le32(flags);
8145 /* Copy EIR or advertising data into event */
8146 memcpy(ev->eir, eir, eir_len);
8148 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
8149 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8152 if (scan_rsp_len > 0)
8153 /* Append scan response data to event */
8154 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8156 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8157 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8159 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8162 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8163 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8165 struct mgmt_ev_device_found *ev;
8166 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8169 ev = (struct mgmt_ev_device_found *) buf;
8171 memset(buf, 0, sizeof(buf));
8173 bacpy(&ev->addr.bdaddr, bdaddr);
8174 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8177 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8180 ev->eir_len = cpu_to_le16(eir_len);
8182 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8185 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8187 struct mgmt_ev_discovering ev;
8189 BT_DBG("%s discovering %u", hdev->name, discovering);
8191 memset(&ev, 0, sizeof(ev));
8192 ev.type = hdev->discovery.type;
8193 ev.discovering = discovering;
8195 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8198 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8200 BT_DBG("%s status %u", hdev->name, status);
8203 void mgmt_reenable_advertising(struct hci_dev *hdev)
8205 struct hci_request req;
8207 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
8208 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
8211 hci_req_init(&req, hdev);
8212 enable_advertising(&req);
8213 hci_req_run(&req, adv_enable_complete);
8216 static struct hci_mgmt_chan chan = {
8217 .channel = HCI_CHANNEL_CONTROL,
8218 .handler_count = ARRAY_SIZE(mgmt_handlers),
8219 .handlers = mgmt_handlers,
8220 .hdev_init = mgmt_init_hdev,
8225 return hci_mgmt_chan_register(&chan);
8228 void mgmt_exit(void)
8230 hci_mgmt_chan_unregister(&chan);