2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 9
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
51 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
107 static const u16 mgmt_events[] = {
108 MGMT_EV_CONTROLLER_ERROR,
110 MGMT_EV_INDEX_REMOVED,
111 MGMT_EV_NEW_SETTINGS,
112 MGMT_EV_CLASS_OF_DEV_CHANGED,
113 MGMT_EV_LOCAL_NAME_CHANGED,
114 MGMT_EV_NEW_LINK_KEY,
115 MGMT_EV_NEW_LONG_TERM_KEY,
116 MGMT_EV_DEVICE_CONNECTED,
117 MGMT_EV_DEVICE_DISCONNECTED,
118 MGMT_EV_CONNECT_FAILED,
119 MGMT_EV_PIN_CODE_REQUEST,
120 MGMT_EV_USER_CONFIRM_REQUEST,
121 MGMT_EV_USER_PASSKEY_REQUEST,
123 MGMT_EV_DEVICE_FOUND,
125 MGMT_EV_DEVICE_BLOCKED,
126 MGMT_EV_DEVICE_UNBLOCKED,
127 MGMT_EV_DEVICE_UNPAIRED,
128 MGMT_EV_PASSKEY_NOTIFY,
131 MGMT_EV_DEVICE_ADDED,
132 MGMT_EV_DEVICE_REMOVED,
133 MGMT_EV_NEW_CONN_PARAM,
134 MGMT_EV_UNCONF_INDEX_ADDED,
135 MGMT_EV_UNCONF_INDEX_REMOVED,
136 MGMT_EV_NEW_CONFIG_OPTIONS,
137 MGMT_EV_EXT_INDEX_ADDED,
138 MGMT_EV_EXT_INDEX_REMOVED,
139 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 MGMT_EV_ADVERTISING_ADDED,
141 MGMT_EV_ADVERTISING_REMOVED,
144 static const u16 mgmt_untrusted_commands[] = {
145 MGMT_OP_READ_INDEX_LIST,
147 MGMT_OP_READ_UNCONF_INDEX_LIST,
148 MGMT_OP_READ_CONFIG_INFO,
149 MGMT_OP_READ_EXT_INDEX_LIST,
152 static const u16 mgmt_untrusted_events[] = {
154 MGMT_EV_INDEX_REMOVED,
155 MGMT_EV_NEW_SETTINGS,
156 MGMT_EV_CLASS_OF_DEV_CHANGED,
157 MGMT_EV_LOCAL_NAME_CHANGED,
158 MGMT_EV_UNCONF_INDEX_ADDED,
159 MGMT_EV_UNCONF_INDEX_REMOVED,
160 MGMT_EV_NEW_CONFIG_OPTIONS,
161 MGMT_EV_EXT_INDEX_ADDED,
162 MGMT_EV_EXT_INDEX_REMOVED,
165 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 "\x00\x00\x00\x00\x00\x00\x00\x00"
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table[] = {
173 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
174 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
175 MGMT_STATUS_FAILED, /* Hardware Failure */
176 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
177 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
178 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
179 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
180 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
181 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
182 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
183 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
184 MGMT_STATUS_BUSY, /* Command Disallowed */
185 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
186 MGMT_STATUS_REJECTED, /* Rejected Security */
187 MGMT_STATUS_REJECTED, /* Rejected Personal */
188 MGMT_STATUS_TIMEOUT, /* Host Timeout */
189 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
190 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
191 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
192 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
193 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
194 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
195 MGMT_STATUS_BUSY, /* Repeated Attempts */
196 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
197 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
198 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
199 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
200 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
201 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
202 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
203 MGMT_STATUS_FAILED, /* Unspecified Error */
204 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
205 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
206 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
207 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
208 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
209 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
210 MGMT_STATUS_FAILED, /* Unit Link Key Used */
211 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
212 MGMT_STATUS_TIMEOUT, /* Instant Passed */
213 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
214 MGMT_STATUS_FAILED, /* Transaction Collision */
215 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
216 MGMT_STATUS_REJECTED, /* QoS Rejected */
217 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
218 MGMT_STATUS_REJECTED, /* Insufficient Security */
219 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
220 MGMT_STATUS_BUSY, /* Role Switch Pending */
221 MGMT_STATUS_FAILED, /* Slot Violation */
222 MGMT_STATUS_FAILED, /* Role Switch Failed */
223 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
224 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
225 MGMT_STATUS_BUSY, /* Host Busy Pairing */
226 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
227 MGMT_STATUS_BUSY, /* Controller Busy */
228 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
229 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
230 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
231 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
232 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
235 static u8 mgmt_status(u8 hci_status)
237 if (hci_status < ARRAY_SIZE(mgmt_status_table))
238 return mgmt_status_table[hci_status];
240 return MGMT_STATUS_FAILED;
243 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
246 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
250 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
251 u16 len, int flag, struct sock *skip_sk)
253 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
257 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
258 u16 len, struct sock *skip_sk)
260 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 HCI_MGMT_GENERIC_EVENTS, skip_sk);
264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 struct sock *skip_sk)
267 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 HCI_SOCK_TRUSTED, skip_sk);
271 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
274 struct mgmt_rp_read_version rp;
276 BT_DBG("sock %p", sk);
278 rp.version = MGMT_VERSION;
279 rp.revision = cpu_to_le16(MGMT_REVISION);
281 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
285 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
288 struct mgmt_rp_read_commands *rp;
289 u16 num_commands, num_events;
293 BT_DBG("sock %p", sk);
295 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
296 num_commands = ARRAY_SIZE(mgmt_commands);
297 num_events = ARRAY_SIZE(mgmt_events);
299 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
300 num_events = ARRAY_SIZE(mgmt_untrusted_events);
303 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
305 rp = kmalloc(rp_size, GFP_KERNEL);
309 rp->num_commands = cpu_to_le16(num_commands);
310 rp->num_events = cpu_to_le16(num_events);
312 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
313 __le16 *opcode = rp->opcodes;
315 for (i = 0; i < num_commands; i++, opcode++)
316 put_unaligned_le16(mgmt_commands[i], opcode);
318 for (i = 0; i < num_events; i++, opcode++)
319 put_unaligned_le16(mgmt_events[i], opcode);
321 __le16 *opcode = rp->opcodes;
323 for (i = 0; i < num_commands; i++, opcode++)
324 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
326 for (i = 0; i < num_events; i++, opcode++)
327 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
330 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
337 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
340 struct mgmt_rp_read_index_list *rp;
346 BT_DBG("sock %p", sk);
348 read_lock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (d->dev_type == HCI_BREDR &&
353 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
357 rp_len = sizeof(*rp) + (2 * count);
358 rp = kmalloc(rp_len, GFP_ATOMIC);
360 read_unlock(&hci_dev_list_lock);
365 list_for_each_entry(d, &hci_dev_list, list) {
366 if (hci_dev_test_flag(d, HCI_SETUP) ||
367 hci_dev_test_flag(d, HCI_CONFIG) ||
368 hci_dev_test_flag(d, HCI_USER_CHANNEL))
371 /* Devices marked as raw-only are neither configured
372 * nor unconfigured controllers.
374 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
377 if (d->dev_type == HCI_BREDR &&
378 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
379 rp->index[count++] = cpu_to_le16(d->id);
380 BT_DBG("Added hci%u", d->id);
384 rp->num_controllers = cpu_to_le16(count);
385 rp_len = sizeof(*rp) + (2 * count);
387 read_unlock(&hci_dev_list_lock);
389 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
397 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
398 void *data, u16 data_len)
400 struct mgmt_rp_read_unconf_index_list *rp;
406 BT_DBG("sock %p", sk);
408 read_lock(&hci_dev_list_lock);
411 list_for_each_entry(d, &hci_dev_list, list) {
412 if (d->dev_type == HCI_BREDR &&
413 hci_dev_test_flag(d, HCI_UNCONFIGURED))
417 rp_len = sizeof(*rp) + (2 * count);
418 rp = kmalloc(rp_len, GFP_ATOMIC);
420 read_unlock(&hci_dev_list_lock);
425 list_for_each_entry(d, &hci_dev_list, list) {
426 if (hci_dev_test_flag(d, HCI_SETUP) ||
427 hci_dev_test_flag(d, HCI_CONFIG) ||
428 hci_dev_test_flag(d, HCI_USER_CHANNEL))
431 /* Devices marked as raw-only are neither configured
432 * nor unconfigured controllers.
434 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
437 if (d->dev_type == HCI_BREDR &&
438 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
439 rp->index[count++] = cpu_to_le16(d->id);
440 BT_DBG("Added hci%u", d->id);
444 rp->num_controllers = cpu_to_le16(count);
445 rp_len = sizeof(*rp) + (2 * count);
447 read_unlock(&hci_dev_list_lock);
449 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
450 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
457 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
458 void *data, u16 data_len)
460 struct mgmt_rp_read_ext_index_list *rp;
466 BT_DBG("sock %p", sk);
468 read_lock(&hci_dev_list_lock);
471 list_for_each_entry(d, &hci_dev_list, list) {
472 if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
476 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
477 rp = kmalloc(rp_len, GFP_ATOMIC);
479 read_unlock(&hci_dev_list_lock);
484 list_for_each_entry(d, &hci_dev_list, list) {
485 if (hci_dev_test_flag(d, HCI_SETUP) ||
486 hci_dev_test_flag(d, HCI_CONFIG) ||
487 hci_dev_test_flag(d, HCI_USER_CHANNEL))
490 /* Devices marked as raw-only are neither configured
491 * nor unconfigured controllers.
493 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
496 if (d->dev_type == HCI_BREDR) {
497 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
498 rp->entry[count].type = 0x01;
500 rp->entry[count].type = 0x00;
501 } else if (d->dev_type == HCI_AMP) {
502 rp->entry[count].type = 0x02;
507 rp->entry[count].bus = d->bus;
508 rp->entry[count++].index = cpu_to_le16(d->id);
509 BT_DBG("Added hci%u", d->id);
512 rp->num_controllers = cpu_to_le16(count);
513 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
515 read_unlock(&hci_dev_list_lock);
517 /* If this command is called at least once, then all the
518 * default index and unconfigured index events are disabled
519 * and from now on only extended index events are used.
521 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
522 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
523 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
525 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
526 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
533 static bool is_configured(struct hci_dev *hdev)
535 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
536 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
539 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
540 !bacmp(&hdev->public_addr, BDADDR_ANY))
546 static __le32 get_missing_options(struct hci_dev *hdev)
550 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
552 options |= MGMT_OPTION_EXTERNAL_CONFIG;
554 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
555 !bacmp(&hdev->public_addr, BDADDR_ANY))
556 options |= MGMT_OPTION_PUBLIC_ADDRESS;
558 return cpu_to_le32(options);
561 static int new_options(struct hci_dev *hdev, struct sock *skip)
563 __le32 options = get_missing_options(hdev);
565 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
566 sizeof(options), skip);
569 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
571 __le32 options = get_missing_options(hdev);
573 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
577 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
578 void *data, u16 data_len)
580 struct mgmt_rp_read_config_info rp;
583 BT_DBG("sock %p %s", sk, hdev->name);
587 memset(&rp, 0, sizeof(rp));
588 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
590 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
591 options |= MGMT_OPTION_EXTERNAL_CONFIG;
593 if (hdev->set_bdaddr)
594 options |= MGMT_OPTION_PUBLIC_ADDRESS;
596 rp.supported_options = cpu_to_le32(options);
597 rp.missing_options = get_missing_options(hdev);
599 hci_dev_unlock(hdev);
601 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
605 static u32 get_supported_settings(struct hci_dev *hdev)
609 settings |= MGMT_SETTING_POWERED;
610 settings |= MGMT_SETTING_BONDABLE;
611 settings |= MGMT_SETTING_DEBUG_KEYS;
612 settings |= MGMT_SETTING_CONNECTABLE;
613 settings |= MGMT_SETTING_DISCOVERABLE;
615 if (lmp_bredr_capable(hdev)) {
616 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
617 settings |= MGMT_SETTING_FAST_CONNECTABLE;
618 settings |= MGMT_SETTING_BREDR;
619 settings |= MGMT_SETTING_LINK_SECURITY;
621 if (lmp_ssp_capable(hdev)) {
622 settings |= MGMT_SETTING_SSP;
623 settings |= MGMT_SETTING_HS;
626 if (lmp_sc_capable(hdev))
627 settings |= MGMT_SETTING_SECURE_CONN;
630 if (lmp_le_capable(hdev)) {
631 settings |= MGMT_SETTING_LE;
632 settings |= MGMT_SETTING_ADVERTISING;
633 settings |= MGMT_SETTING_SECURE_CONN;
634 settings |= MGMT_SETTING_PRIVACY;
635 settings |= MGMT_SETTING_STATIC_ADDRESS;
638 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
640 settings |= MGMT_SETTING_CONFIGURATION;
645 static u32 get_current_settings(struct hci_dev *hdev)
649 if (hdev_is_powered(hdev))
650 settings |= MGMT_SETTING_POWERED;
652 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
653 settings |= MGMT_SETTING_CONNECTABLE;
655 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
656 settings |= MGMT_SETTING_FAST_CONNECTABLE;
658 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
659 settings |= MGMT_SETTING_DISCOVERABLE;
661 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
662 settings |= MGMT_SETTING_BONDABLE;
664 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
665 settings |= MGMT_SETTING_BREDR;
667 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
668 settings |= MGMT_SETTING_LE;
670 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
671 settings |= MGMT_SETTING_LINK_SECURITY;
673 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
674 settings |= MGMT_SETTING_SSP;
676 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
677 settings |= MGMT_SETTING_HS;
679 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
680 settings |= MGMT_SETTING_ADVERTISING;
682 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
683 settings |= MGMT_SETTING_SECURE_CONN;
685 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
686 settings |= MGMT_SETTING_DEBUG_KEYS;
688 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
689 settings |= MGMT_SETTING_PRIVACY;
691 /* The current setting for static address has two purposes. The
692 * first is to indicate if the static address will be used and
693 * the second is to indicate if it is actually set.
695 * This means if the static address is not configured, this flag
696 * will never be set. If the address is configured, then if the
697 * address is actually used decides if the flag is set or not.
699 * For single mode LE only controllers and dual-mode controllers
700 * with BR/EDR disabled, the existence of the static address will
703 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
704 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
705 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
706 if (bacmp(&hdev->static_addr, BDADDR_ANY))
707 settings |= MGMT_SETTING_STATIC_ADDRESS;
713 #define PNP_INFO_SVCLASS_ID 0x1200
715 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
717 u8 *ptr = data, *uuids_start = NULL;
718 struct bt_uuid *uuid;
723 list_for_each_entry(uuid, &hdev->uuids, list) {
726 if (uuid->size != 16)
729 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
733 if (uuid16 == PNP_INFO_SVCLASS_ID)
739 uuids_start[1] = EIR_UUID16_ALL;
743 /* Stop if not enough space to put next UUID */
744 if ((ptr - data) + sizeof(u16) > len) {
745 uuids_start[1] = EIR_UUID16_SOME;
749 *ptr++ = (uuid16 & 0x00ff);
750 *ptr++ = (uuid16 & 0xff00) >> 8;
751 uuids_start[0] += sizeof(uuid16);
757 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
759 u8 *ptr = data, *uuids_start = NULL;
760 struct bt_uuid *uuid;
765 list_for_each_entry(uuid, &hdev->uuids, list) {
766 if (uuid->size != 32)
772 uuids_start[1] = EIR_UUID32_ALL;
776 /* Stop if not enough space to put next UUID */
777 if ((ptr - data) + sizeof(u32) > len) {
778 uuids_start[1] = EIR_UUID32_SOME;
782 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
784 uuids_start[0] += sizeof(u32);
790 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
792 u8 *ptr = data, *uuids_start = NULL;
793 struct bt_uuid *uuid;
798 list_for_each_entry(uuid, &hdev->uuids, list) {
799 if (uuid->size != 128)
805 uuids_start[1] = EIR_UUID128_ALL;
809 /* Stop if not enough space to put next UUID */
810 if ((ptr - data) + 16 > len) {
811 uuids_start[1] = EIR_UUID128_SOME;
815 memcpy(ptr, uuid->uuid, 16);
817 uuids_start[0] += 16;
823 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
825 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
828 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
829 struct hci_dev *hdev,
832 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
835 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
840 name_len = strlen(hdev->dev_name);
842 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
844 if (name_len > max_len) {
846 ptr[1] = EIR_NAME_SHORT;
848 ptr[1] = EIR_NAME_COMPLETE;
850 ptr[0] = name_len + 1;
852 memcpy(ptr + 2, hdev->dev_name, name_len);
854 ad_len += (name_len + 2);
855 ptr += (name_len + 2);
861 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
863 /* TODO: Set the appropriate entries based on advertising instance flags
864 * here once flags other than 0 are supported.
866 memcpy(ptr, hdev->adv_instance.scan_rsp_data,
867 hdev->adv_instance.scan_rsp_len);
869 return hdev->adv_instance.scan_rsp_len;
872 static void update_scan_rsp_data_for_instance(struct hci_request *req,
875 struct hci_dev *hdev = req->hdev;
876 struct hci_cp_le_set_scan_rsp_data cp;
879 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 memset(&cp, 0, sizeof(cp));
885 len = create_instance_scan_rsp_data(hdev, cp.data);
887 len = create_default_scan_rsp_data(hdev, cp.data);
889 if (hdev->scan_rsp_data_len == len &&
890 !memcmp(cp.data, hdev->scan_rsp_data, len))
893 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
894 hdev->scan_rsp_data_len = len;
898 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
901 static void update_scan_rsp_data(struct hci_request *req)
903 struct hci_dev *hdev = req->hdev;
906 /* The "Set Advertising" setting supersedes the "Add Advertising"
907 * setting. Here we set the scan response data based on which
908 * setting was set. When neither apply, default to the global settings,
909 * represented by instance "0".
911 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
912 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
917 update_scan_rsp_data_for_instance(req, instance);
920 static u8 get_adv_discov_flags(struct hci_dev *hdev)
922 struct mgmt_pending_cmd *cmd;
924 /* If there's a pending mgmt command the flags will not yet have
925 * their final values, so check for this first.
927 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
929 struct mgmt_mode *cp = cmd->param;
931 return LE_AD_GENERAL;
932 else if (cp->val == 0x02)
933 return LE_AD_LIMITED;
935 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
936 return LE_AD_LIMITED;
937 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
938 return LE_AD_GENERAL;
944 static u8 create_default_adv_data(struct hci_dev *hdev, u8 *ptr)
946 u8 ad_len = 0, flags = 0;
948 flags |= get_adv_discov_flags(hdev);
950 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
951 flags |= LE_AD_NO_BREDR;
954 BT_DBG("adv flags 0x%02x", flags);
964 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
966 ptr[1] = EIR_TX_POWER;
967 ptr[2] = (u8) hdev->adv_tx_power;
976 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 *ptr)
978 /* TODO: Set the appropriate entries based on advertising instance flags
979 * here once flags other than 0 are supported.
981 memcpy(ptr, hdev->adv_instance.adv_data,
982 hdev->adv_instance.adv_data_len);
984 return hdev->adv_instance.adv_data_len;
987 static void update_adv_data_for_instance(struct hci_request *req, u8 instance)
989 struct hci_dev *hdev = req->hdev;
990 struct hci_cp_le_set_adv_data cp;
993 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
996 memset(&cp, 0, sizeof(cp));
999 len = create_instance_adv_data(hdev, cp.data);
1001 len = create_default_adv_data(hdev, cp.data);
1003 /* There's nothing to do if the data hasn't changed */
1004 if (hdev->adv_data_len == len &&
1005 memcmp(cp.data, hdev->adv_data, len) == 0)
1008 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1009 hdev->adv_data_len = len;
1013 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1016 static void update_adv_data(struct hci_request *req)
1018 struct hci_dev *hdev = req->hdev;
1021 /* The "Set Advertising" setting supersedes the "Add Advertising"
1022 * setting. Here we set the advertising data based on which
1023 * setting was set. When neither apply, default to the global settings,
1024 * represented by instance "0".
1026 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
1027 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
1032 update_adv_data_for_instance(req, instance);
1035 int mgmt_update_adv_data(struct hci_dev *hdev)
1037 struct hci_request req;
1039 hci_req_init(&req, hdev);
1040 update_adv_data(&req);
1042 return hci_req_run(&req, NULL);
1045 static void create_eir(struct hci_dev *hdev, u8 *data)
1050 name_len = strlen(hdev->dev_name);
1054 if (name_len > 48) {
1056 ptr[1] = EIR_NAME_SHORT;
1058 ptr[1] = EIR_NAME_COMPLETE;
1060 /* EIR Data length */
1061 ptr[0] = name_len + 1;
1063 memcpy(ptr + 2, hdev->dev_name, name_len);
1065 ptr += (name_len + 2);
1068 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1070 ptr[1] = EIR_TX_POWER;
1071 ptr[2] = (u8) hdev->inq_tx_power;
1076 if (hdev->devid_source > 0) {
1078 ptr[1] = EIR_DEVICE_ID;
1080 put_unaligned_le16(hdev->devid_source, ptr + 2);
1081 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1082 put_unaligned_le16(hdev->devid_product, ptr + 6);
1083 put_unaligned_le16(hdev->devid_version, ptr + 8);
1088 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1089 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1090 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1093 static void update_eir(struct hci_request *req)
1095 struct hci_dev *hdev = req->hdev;
1096 struct hci_cp_write_eir cp;
1098 if (!hdev_is_powered(hdev))
1101 if (!lmp_ext_inq_capable(hdev))
1104 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1107 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1110 memset(&cp, 0, sizeof(cp));
1112 create_eir(hdev, cp.data);
1114 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1117 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1119 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1122 static u8 get_service_classes(struct hci_dev *hdev)
1124 struct bt_uuid *uuid;
1127 list_for_each_entry(uuid, &hdev->uuids, list)
1128 val |= uuid->svc_hint;
1133 static void update_class(struct hci_request *req)
1135 struct hci_dev *hdev = req->hdev;
1138 BT_DBG("%s", hdev->name);
1140 if (!hdev_is_powered(hdev))
1143 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1146 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1149 cod[0] = hdev->minor_class;
1150 cod[1] = hdev->major_class;
1151 cod[2] = get_service_classes(hdev);
1153 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1156 if (memcmp(cod, hdev->dev_class, 3) == 0)
1159 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1162 static bool get_connectable(struct hci_dev *hdev)
1164 struct mgmt_pending_cmd *cmd;
1166 /* If there's a pending mgmt command the flag will not yet have
1167 * it's final value, so check for this first.
1169 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1171 struct mgmt_mode *cp = cmd->param;
1175 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1178 static void disable_advertising(struct hci_request *req)
1182 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1185 static void enable_advertising(struct hci_request *req)
1187 struct hci_dev *hdev = req->hdev;
1188 struct hci_cp_le_set_adv_param cp;
1189 u8 own_addr_type, enable = 0x01;
1192 if (hci_conn_num(hdev, LE_LINK) > 0)
1195 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1196 disable_advertising(req);
1198 /* Clear the HCI_LE_ADV bit temporarily so that the
1199 * hci_update_random_address knows that it's safe to go ahead
1200 * and write a new random address. The flag will be set back on
1201 * as soon as the SET_ADV_ENABLE HCI command completes.
1203 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1205 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1208 connectable = get_connectable(hdev);
1210 /* Set require_privacy to true only when non-connectable
1211 * advertising is used. In that case it is fine to use a
1212 * non-resolvable private address.
1214 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1217 memset(&cp, 0, sizeof(cp));
1218 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1219 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1220 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1221 cp.own_address_type = own_addr_type;
1222 cp.channel_map = hdev->le_adv_channel_map;
1224 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1226 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1229 static void service_cache_off(struct work_struct *work)
1231 struct hci_dev *hdev = container_of(work, struct hci_dev,
1232 service_cache.work);
1233 struct hci_request req;
1235 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1238 hci_req_init(&req, hdev);
1245 hci_dev_unlock(hdev);
1247 hci_req_run(&req, NULL);
1250 static void rpa_expired(struct work_struct *work)
1252 struct hci_dev *hdev = container_of(work, struct hci_dev,
1254 struct hci_request req;
1258 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1260 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1263 /* The generation of a new RPA and programming it into the
1264 * controller happens in the enable_advertising() function.
1266 hci_req_init(&req, hdev);
1267 enable_advertising(&req);
1268 hci_req_run(&req, NULL);
1271 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1273 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1276 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1277 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1279 /* Non-mgmt controlled devices get this bit set
1280 * implicitly so that pairing works for them, however
1281 * for mgmt we require user-space to explicitly enable
1284 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1287 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1288 void *data, u16 data_len)
1290 struct mgmt_rp_read_info rp;
1292 BT_DBG("sock %p %s", sk, hdev->name);
1296 memset(&rp, 0, sizeof(rp));
1298 bacpy(&rp.bdaddr, &hdev->bdaddr);
1300 rp.version = hdev->hci_ver;
1301 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1303 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1304 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1306 memcpy(rp.dev_class, hdev->dev_class, 3);
1308 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1309 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1311 hci_dev_unlock(hdev);
1313 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1317 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1319 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1321 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1325 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1327 BT_DBG("%s status 0x%02x", hdev->name, status);
1329 if (hci_conn_count(hdev) == 0) {
1330 cancel_delayed_work(&hdev->power_off);
1331 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1335 static bool hci_stop_discovery(struct hci_request *req)
1337 struct hci_dev *hdev = req->hdev;
1338 struct hci_cp_remote_name_req_cancel cp;
1339 struct inquiry_entry *e;
1341 switch (hdev->discovery.state) {
1342 case DISCOVERY_FINDING:
1343 if (test_bit(HCI_INQUIRY, &hdev->flags))
1344 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1346 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1347 cancel_delayed_work(&hdev->le_scan_disable);
1348 hci_req_add_le_scan_disable(req);
1353 case DISCOVERY_RESOLVING:
1354 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1359 bacpy(&cp.bdaddr, &e->data.bdaddr);
1360 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1366 /* Passive scanning */
1367 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1368 hci_req_add_le_scan_disable(req);
1378 static void advertising_added(struct sock *sk, struct hci_dev *hdev,
1381 struct mgmt_ev_advertising_added ev;
1383 ev.instance = instance;
1385 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1388 static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
1391 struct mgmt_ev_advertising_removed ev;
1393 ev.instance = instance;
1395 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1398 static void clear_adv_instance(struct hci_dev *hdev)
1400 struct hci_request req;
1402 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1405 if (hdev->adv_instance.timeout)
1406 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
1408 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
1409 advertising_removed(NULL, hdev, 1);
1410 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1412 if (!hdev_is_powered(hdev) ||
1413 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1416 hci_req_init(&req, hdev);
1417 disable_advertising(&req);
1418 hci_req_run(&req, NULL);
1421 static int clean_up_hci_state(struct hci_dev *hdev)
1423 struct hci_request req;
1424 struct hci_conn *conn;
1425 bool discov_stopped;
1428 hci_req_init(&req, hdev);
1430 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1431 test_bit(HCI_PSCAN, &hdev->flags)) {
1433 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1436 if (hdev->adv_instance.timeout)
1437 clear_adv_instance(hdev);
1439 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1440 disable_advertising(&req);
1442 discov_stopped = hci_stop_discovery(&req);
1444 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1445 struct hci_cp_disconnect dc;
1446 struct hci_cp_reject_conn_req rej;
1448 switch (conn->state) {
1451 dc.handle = cpu_to_le16(conn->handle);
1452 dc.reason = 0x15; /* Terminated due to Power Off */
1453 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1456 if (conn->type == LE_LINK)
1457 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1459 else if (conn->type == ACL_LINK)
1460 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1464 bacpy(&rej.bdaddr, &conn->dst);
1465 rej.reason = 0x15; /* Terminated due to Power Off */
1466 if (conn->type == ACL_LINK)
1467 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1469 else if (conn->type == SCO_LINK)
1470 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1476 err = hci_req_run(&req, clean_up_hci_complete);
1477 if (!err && discov_stopped)
1478 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1483 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1486 struct mgmt_mode *cp = data;
1487 struct mgmt_pending_cmd *cmd;
1490 BT_DBG("request for %s", hdev->name);
1492 if (cp->val != 0x00 && cp->val != 0x01)
1493 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1494 MGMT_STATUS_INVALID_PARAMS);
1498 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1499 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1504 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1505 cancel_delayed_work(&hdev->power_off);
1508 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1510 err = mgmt_powered(hdev, 1);
1515 if (!!cp->val == hdev_is_powered(hdev)) {
1516 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1520 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1527 queue_work(hdev->req_workqueue, &hdev->power_on);
1530 /* Disconnect connections, stop scans, etc */
1531 err = clean_up_hci_state(hdev);
1533 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1534 HCI_POWER_OFF_TIMEOUT);
1536 /* ENODATA means there were no HCI commands queued */
1537 if (err == -ENODATA) {
1538 cancel_delayed_work(&hdev->power_off);
1539 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1545 hci_dev_unlock(hdev);
1549 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1551 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1553 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1557 int mgmt_new_settings(struct hci_dev *hdev)
1559 return new_settings(hdev, NULL);
1564 struct hci_dev *hdev;
1568 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1570 struct cmd_lookup *match = data;
1572 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1574 list_del(&cmd->list);
1576 if (match->sk == NULL) {
1577 match->sk = cmd->sk;
1578 sock_hold(match->sk);
1581 mgmt_pending_free(cmd);
1584 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1588 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1589 mgmt_pending_remove(cmd);
1592 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1594 if (cmd->cmd_complete) {
1597 cmd->cmd_complete(cmd, *status);
1598 mgmt_pending_remove(cmd);
1603 cmd_status_rsp(cmd, data);
1606 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1608 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1609 cmd->param, cmd->param_len);
1612 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1614 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1615 cmd->param, sizeof(struct mgmt_addr_info));
1618 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1620 if (!lmp_bredr_capable(hdev))
1621 return MGMT_STATUS_NOT_SUPPORTED;
1622 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1623 return MGMT_STATUS_REJECTED;
1625 return MGMT_STATUS_SUCCESS;
1628 static u8 mgmt_le_support(struct hci_dev *hdev)
1630 if (!lmp_le_capable(hdev))
1631 return MGMT_STATUS_NOT_SUPPORTED;
1632 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1633 return MGMT_STATUS_REJECTED;
1635 return MGMT_STATUS_SUCCESS;
1638 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1641 struct mgmt_pending_cmd *cmd;
1642 struct mgmt_mode *cp;
1643 struct hci_request req;
1646 BT_DBG("status 0x%02x", status);
1650 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1655 u8 mgmt_err = mgmt_status(status);
1656 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1657 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1663 changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1665 if (hdev->discov_timeout > 0) {
1666 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1667 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1671 changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1674 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1677 new_settings(hdev, cmd->sk);
1679 /* When the discoverable mode gets changed, make sure
1680 * that class of device has the limited discoverable
1681 * bit correctly set. Also update page scan based on whitelist
1684 hci_req_init(&req, hdev);
1685 __hci_update_page_scan(&req);
1687 hci_req_run(&req, NULL);
1690 mgmt_pending_remove(cmd);
1693 hci_dev_unlock(hdev);
1696 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1699 struct mgmt_cp_set_discoverable *cp = data;
1700 struct mgmt_pending_cmd *cmd;
1701 struct hci_request req;
1706 BT_DBG("request for %s", hdev->name);
1708 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1709 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1710 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1711 MGMT_STATUS_REJECTED);
1713 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1714 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1715 MGMT_STATUS_INVALID_PARAMS);
1717 timeout = __le16_to_cpu(cp->timeout);
1719 /* Disabling discoverable requires that no timeout is set,
1720 * and enabling limited discoverable requires a timeout.
1722 if ((cp->val == 0x00 && timeout > 0) ||
1723 (cp->val == 0x02 && timeout == 0))
1724 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1725 MGMT_STATUS_INVALID_PARAMS);
1729 if (!hdev_is_powered(hdev) && timeout > 0) {
1730 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1731 MGMT_STATUS_NOT_POWERED);
1735 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1736 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1737 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1742 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1743 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1744 MGMT_STATUS_REJECTED);
1748 if (!hdev_is_powered(hdev)) {
1749 bool changed = false;
1751 /* Setting limited discoverable when powered off is
1752 * not a valid operation since it requires a timeout
1753 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1755 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1756 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1760 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1765 err = new_settings(hdev, sk);
1770 /* If the current mode is the same, then just update the timeout
1771 * value with the new value. And if only the timeout gets updated,
1772 * then no need for any HCI transactions.
1774 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1775 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1776 HCI_LIMITED_DISCOVERABLE)) {
1777 cancel_delayed_work(&hdev->discov_off);
1778 hdev->discov_timeout = timeout;
1780 if (cp->val && hdev->discov_timeout > 0) {
1781 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1782 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1786 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1790 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1796 /* Cancel any potential discoverable timeout that might be
1797 * still active and store new timeout value. The arming of
1798 * the timeout happens in the complete handler.
1800 cancel_delayed_work(&hdev->discov_off);
1801 hdev->discov_timeout = timeout;
1803 /* Limited discoverable mode */
1804 if (cp->val == 0x02)
1805 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1807 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1809 hci_req_init(&req, hdev);
1811 /* The procedure for LE-only controllers is much simpler - just
1812 * update the advertising data.
1814 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1820 struct hci_cp_write_current_iac_lap hci_cp;
1822 if (cp->val == 0x02) {
1823 /* Limited discoverable mode */
1824 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1825 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1826 hci_cp.iac_lap[1] = 0x8b;
1827 hci_cp.iac_lap[2] = 0x9e;
1828 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1829 hci_cp.iac_lap[4] = 0x8b;
1830 hci_cp.iac_lap[5] = 0x9e;
1832 /* General discoverable mode */
1834 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1835 hci_cp.iac_lap[1] = 0x8b;
1836 hci_cp.iac_lap[2] = 0x9e;
1839 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1840 (hci_cp.num_iac * 3) + 1, &hci_cp);
1842 scan |= SCAN_INQUIRY;
1844 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1847 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1850 update_adv_data(&req);
1852 err = hci_req_run(&req, set_discoverable_complete);
1854 mgmt_pending_remove(cmd);
1857 hci_dev_unlock(hdev);
1861 static void write_fast_connectable(struct hci_request *req, bool enable)
1863 struct hci_dev *hdev = req->hdev;
1864 struct hci_cp_write_page_scan_activity acp;
1867 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1870 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1874 type = PAGE_SCAN_TYPE_INTERLACED;
1876 /* 160 msec page scan interval */
1877 acp.interval = cpu_to_le16(0x0100);
1879 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1881 /* default 1.28 sec page scan */
1882 acp.interval = cpu_to_le16(0x0800);
1885 acp.window = cpu_to_le16(0x0012);
1887 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1888 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1889 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1892 if (hdev->page_scan_type != type)
1893 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1896 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1899 struct mgmt_pending_cmd *cmd;
1900 struct mgmt_mode *cp;
1901 bool conn_changed, discov_changed;
1903 BT_DBG("status 0x%02x", status);
1907 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1912 u8 mgmt_err = mgmt_status(status);
1913 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1919 conn_changed = !hci_dev_test_and_set_flag(hdev,
1921 discov_changed = false;
1923 conn_changed = hci_dev_test_and_clear_flag(hdev,
1925 discov_changed = hci_dev_test_and_clear_flag(hdev,
1929 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1931 if (conn_changed || discov_changed) {
1932 new_settings(hdev, cmd->sk);
1933 hci_update_page_scan(hdev);
1935 mgmt_update_adv_data(hdev);
1936 hci_update_background_scan(hdev);
1940 mgmt_pending_remove(cmd);
1943 hci_dev_unlock(hdev);
1946 static int set_connectable_update_settings(struct hci_dev *hdev,
1947 struct sock *sk, u8 val)
1949 bool changed = false;
1952 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1956 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1958 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1959 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1962 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1967 hci_update_page_scan(hdev);
1968 hci_update_background_scan(hdev);
1969 return new_settings(hdev, sk);
1975 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1978 struct mgmt_mode *cp = data;
1979 struct mgmt_pending_cmd *cmd;
1980 struct hci_request req;
1984 BT_DBG("request for %s", hdev->name);
1986 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1987 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1988 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1989 MGMT_STATUS_REJECTED);
1991 if (cp->val != 0x00 && cp->val != 0x01)
1992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1993 MGMT_STATUS_INVALID_PARAMS);
1997 if (!hdev_is_powered(hdev)) {
1998 err = set_connectable_update_settings(hdev, sk, cp->val);
2002 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
2003 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2004 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2009 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2015 hci_req_init(&req, hdev);
2017 /* If BR/EDR is not enabled and we disable advertising as a
2018 * by-product of disabling connectable, we need to update the
2019 * advertising flags.
2021 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2023 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2024 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2026 update_adv_data(&req);
2027 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2031 /* If we don't have any whitelist entries just
2032 * disable all scanning. If there are entries
2033 * and we had both page and inquiry scanning
2034 * enabled then fall back to only page scanning.
2035 * Otherwise no changes are needed.
2037 if (list_empty(&hdev->whitelist))
2038 scan = SCAN_DISABLED;
2039 else if (test_bit(HCI_ISCAN, &hdev->flags))
2042 goto no_scan_update;
2044 if (test_bit(HCI_ISCAN, &hdev->flags) &&
2045 hdev->discov_timeout > 0)
2046 cancel_delayed_work(&hdev->discov_off);
2049 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2053 /* Update the advertising parameters if necessary */
2054 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2055 enable_advertising(&req);
2057 err = hci_req_run(&req, set_connectable_complete);
2059 mgmt_pending_remove(cmd);
2060 if (err == -ENODATA)
2061 err = set_connectable_update_settings(hdev, sk,
2067 hci_dev_unlock(hdev);
2071 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2074 struct mgmt_mode *cp = data;
2078 BT_DBG("request for %s", hdev->name);
2080 if (cp->val != 0x00 && cp->val != 0x01)
2081 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2082 MGMT_STATUS_INVALID_PARAMS);
2087 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2089 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2091 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2096 err = new_settings(hdev, sk);
2099 hci_dev_unlock(hdev);
2103 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2106 struct mgmt_mode *cp = data;
2107 struct mgmt_pending_cmd *cmd;
2111 BT_DBG("request for %s", hdev->name);
2113 status = mgmt_bredr_support(hdev);
2115 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2118 if (cp->val != 0x00 && cp->val != 0x01)
2119 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2120 MGMT_STATUS_INVALID_PARAMS);
2124 if (!hdev_is_powered(hdev)) {
2125 bool changed = false;
2127 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2128 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2132 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2137 err = new_settings(hdev, sk);
2142 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2143 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2150 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2151 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2155 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2161 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2163 mgmt_pending_remove(cmd);
2168 hci_dev_unlock(hdev);
2172 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2174 struct mgmt_mode *cp = data;
2175 struct mgmt_pending_cmd *cmd;
2179 BT_DBG("request for %s", hdev->name);
2181 status = mgmt_bredr_support(hdev);
2183 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2185 if (!lmp_ssp_capable(hdev))
2186 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2187 MGMT_STATUS_NOT_SUPPORTED);
2189 if (cp->val != 0x00 && cp->val != 0x01)
2190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2191 MGMT_STATUS_INVALID_PARAMS);
2195 if (!hdev_is_powered(hdev)) {
2199 changed = !hci_dev_test_and_set_flag(hdev,
2202 changed = hci_dev_test_and_clear_flag(hdev,
2205 changed = hci_dev_test_and_clear_flag(hdev,
2208 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2211 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2216 err = new_settings(hdev, sk);
2221 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2222 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2227 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2228 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2232 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2238 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2239 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2240 sizeof(cp->val), &cp->val);
2242 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2244 mgmt_pending_remove(cmd);
2249 hci_dev_unlock(hdev);
2253 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2255 struct mgmt_mode *cp = data;
2260 BT_DBG("request for %s", hdev->name);
2262 status = mgmt_bredr_support(hdev);
2264 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2266 if (!lmp_ssp_capable(hdev))
2267 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2268 MGMT_STATUS_NOT_SUPPORTED);
2270 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2271 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2272 MGMT_STATUS_REJECTED);
2274 if (cp->val != 0x00 && cp->val != 0x01)
2275 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2276 MGMT_STATUS_INVALID_PARAMS);
2280 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2281 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2287 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2289 if (hdev_is_powered(hdev)) {
2290 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2291 MGMT_STATUS_REJECTED);
2295 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2298 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2303 err = new_settings(hdev, sk);
2306 hci_dev_unlock(hdev);
2310 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2312 struct cmd_lookup match = { NULL, hdev };
2317 u8 mgmt_err = mgmt_status(status);
2319 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2324 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2326 new_settings(hdev, match.sk);
2331 /* Make sure the controller has a good default for
2332 * advertising data. Restrict the update to when LE
2333 * has actually been enabled. During power on, the
2334 * update in powered_update_hci will take care of it.
2336 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2337 struct hci_request req;
2339 hci_req_init(&req, hdev);
2340 update_adv_data(&req);
2341 update_scan_rsp_data(&req);
2342 __hci_update_background_scan(&req);
2343 hci_req_run(&req, NULL);
2347 hci_dev_unlock(hdev);
2350 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2352 struct mgmt_mode *cp = data;
2353 struct hci_cp_write_le_host_supported hci_cp;
2354 struct mgmt_pending_cmd *cmd;
2355 struct hci_request req;
2359 BT_DBG("request for %s", hdev->name);
2361 if (!lmp_le_capable(hdev))
2362 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2363 MGMT_STATUS_NOT_SUPPORTED);
2365 if (cp->val != 0x00 && cp->val != 0x01)
2366 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2367 MGMT_STATUS_INVALID_PARAMS);
2369 /* Bluetooth single mode LE only controllers or dual-mode
2370 * controllers configured as LE only devices, do not allow
2371 * switching LE off. These have either LE enabled explicitly
2372 * or BR/EDR has been previously switched off.
2374 * When trying to enable an already enabled LE, then gracefully
2375 * send a positive response. Trying to disable it however will
2376 * result into rejection.
2378 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2379 if (cp->val == 0x01)
2380 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2382 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2383 MGMT_STATUS_REJECTED);
2389 enabled = lmp_host_le_capable(hdev);
2391 if (!hdev_is_powered(hdev) || val == enabled) {
2392 bool changed = false;
2394 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2395 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2399 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2400 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2404 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2409 err = new_settings(hdev, sk);
2414 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2415 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2416 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2421 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2427 hci_req_init(&req, hdev);
2429 memset(&hci_cp, 0, sizeof(hci_cp));
2433 hci_cp.simul = 0x00;
2435 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2436 disable_advertising(&req);
2439 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2442 err = hci_req_run(&req, le_enable_complete);
2444 mgmt_pending_remove(cmd);
2447 hci_dev_unlock(hdev);
2451 /* This is a helper function to test for pending mgmt commands that can
2452 * cause CoD or EIR HCI commands. We can only allow one such pending
2453 * mgmt command at a time since otherwise we cannot easily track what
2454 * the current values are, will be, and based on that calculate if a new
2455 * HCI command needs to be sent and if yes with what value.
2457 static bool pending_eir_or_class(struct hci_dev *hdev)
2459 struct mgmt_pending_cmd *cmd;
2461 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2462 switch (cmd->opcode) {
2463 case MGMT_OP_ADD_UUID:
2464 case MGMT_OP_REMOVE_UUID:
2465 case MGMT_OP_SET_DEV_CLASS:
2466 case MGMT_OP_SET_POWERED:
2474 static const u8 bluetooth_base_uuid[] = {
2475 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2476 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2479 static u8 get_uuid_size(const u8 *uuid)
2483 if (memcmp(uuid, bluetooth_base_uuid, 12))
2486 val = get_unaligned_le32(&uuid[12]);
2493 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2495 struct mgmt_pending_cmd *cmd;
2499 cmd = pending_find(mgmt_op, hdev);
2503 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2504 mgmt_status(status), hdev->dev_class, 3);
2506 mgmt_pending_remove(cmd);
2509 hci_dev_unlock(hdev);
2512 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2514 BT_DBG("status 0x%02x", status);
2516 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2519 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2521 struct mgmt_cp_add_uuid *cp = data;
2522 struct mgmt_pending_cmd *cmd;
2523 struct hci_request req;
2524 struct bt_uuid *uuid;
2527 BT_DBG("request for %s", hdev->name);
2531 if (pending_eir_or_class(hdev)) {
2532 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2537 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2543 memcpy(uuid->uuid, cp->uuid, 16);
2544 uuid->svc_hint = cp->svc_hint;
2545 uuid->size = get_uuid_size(cp->uuid);
2547 list_add_tail(&uuid->list, &hdev->uuids);
2549 hci_req_init(&req, hdev);
2554 err = hci_req_run(&req, add_uuid_complete);
2556 if (err != -ENODATA)
2559 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2560 hdev->dev_class, 3);
2564 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2573 hci_dev_unlock(hdev);
2577 static bool enable_service_cache(struct hci_dev *hdev)
2579 if (!hdev_is_powered(hdev))
2582 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2583 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2591 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2593 BT_DBG("status 0x%02x", status);
2595 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2598 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2601 struct mgmt_cp_remove_uuid *cp = data;
2602 struct mgmt_pending_cmd *cmd;
2603 struct bt_uuid *match, *tmp;
2604 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2605 struct hci_request req;
2608 BT_DBG("request for %s", hdev->name);
2612 if (pending_eir_or_class(hdev)) {
2613 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2618 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2619 hci_uuids_clear(hdev);
2621 if (enable_service_cache(hdev)) {
2622 err = mgmt_cmd_complete(sk, hdev->id,
2623 MGMT_OP_REMOVE_UUID,
2624 0, hdev->dev_class, 3);
2633 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2634 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2637 list_del(&match->list);
2643 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2644 MGMT_STATUS_INVALID_PARAMS);
2649 hci_req_init(&req, hdev);
2654 err = hci_req_run(&req, remove_uuid_complete);
2656 if (err != -ENODATA)
2659 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2660 hdev->dev_class, 3);
2664 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2673 hci_dev_unlock(hdev);
2677 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2679 BT_DBG("status 0x%02x", status);
2681 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2684 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2687 struct mgmt_cp_set_dev_class *cp = data;
2688 struct mgmt_pending_cmd *cmd;
2689 struct hci_request req;
2692 BT_DBG("request for %s", hdev->name);
2694 if (!lmp_bredr_capable(hdev))
2695 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2696 MGMT_STATUS_NOT_SUPPORTED);
2700 if (pending_eir_or_class(hdev)) {
2701 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2706 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2707 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2708 MGMT_STATUS_INVALID_PARAMS);
2712 hdev->major_class = cp->major;
2713 hdev->minor_class = cp->minor;
2715 if (!hdev_is_powered(hdev)) {
2716 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2717 hdev->dev_class, 3);
2721 hci_req_init(&req, hdev);
2723 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2724 hci_dev_unlock(hdev);
2725 cancel_delayed_work_sync(&hdev->service_cache);
2732 err = hci_req_run(&req, set_class_complete);
2734 if (err != -ENODATA)
2737 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2738 hdev->dev_class, 3);
2742 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2751 hci_dev_unlock(hdev);
2755 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2758 struct mgmt_cp_load_link_keys *cp = data;
2759 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2760 sizeof(struct mgmt_link_key_info));
2761 u16 key_count, expected_len;
2765 BT_DBG("request for %s", hdev->name);
2767 if (!lmp_bredr_capable(hdev))
2768 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2769 MGMT_STATUS_NOT_SUPPORTED);
2771 key_count = __le16_to_cpu(cp->key_count);
2772 if (key_count > max_key_count) {
2773 BT_ERR("load_link_keys: too big key_count value %u",
2775 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2776 MGMT_STATUS_INVALID_PARAMS);
2779 expected_len = sizeof(*cp) + key_count *
2780 sizeof(struct mgmt_link_key_info);
2781 if (expected_len != len) {
2782 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2784 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2785 MGMT_STATUS_INVALID_PARAMS);
2788 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2789 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2790 MGMT_STATUS_INVALID_PARAMS);
2792 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2795 for (i = 0; i < key_count; i++) {
2796 struct mgmt_link_key_info *key = &cp->keys[i];
2798 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2799 return mgmt_cmd_status(sk, hdev->id,
2800 MGMT_OP_LOAD_LINK_KEYS,
2801 MGMT_STATUS_INVALID_PARAMS);
2806 hci_link_keys_clear(hdev);
2809 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2811 changed = hci_dev_test_and_clear_flag(hdev,
2812 HCI_KEEP_DEBUG_KEYS);
2815 new_settings(hdev, NULL);
2817 for (i = 0; i < key_count; i++) {
2818 struct mgmt_link_key_info *key = &cp->keys[i];
2820 /* Always ignore debug keys and require a new pairing if
2821 * the user wants to use them.
2823 if (key->type == HCI_LK_DEBUG_COMBINATION)
2826 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2827 key->type, key->pin_len, NULL);
2830 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2832 hci_dev_unlock(hdev);
2837 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2838 u8 addr_type, struct sock *skip_sk)
2840 struct mgmt_ev_device_unpaired ev;
2842 bacpy(&ev.addr.bdaddr, bdaddr);
2843 ev.addr.type = addr_type;
2845 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2849 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2852 struct mgmt_cp_unpair_device *cp = data;
2853 struct mgmt_rp_unpair_device rp;
2854 struct hci_cp_disconnect dc;
2855 struct mgmt_pending_cmd *cmd;
2856 struct hci_conn *conn;
2859 memset(&rp, 0, sizeof(rp));
2860 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2861 rp.addr.type = cp->addr.type;
2863 if (!bdaddr_type_is_valid(cp->addr.type))
2864 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2865 MGMT_STATUS_INVALID_PARAMS,
2868 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2869 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2870 MGMT_STATUS_INVALID_PARAMS,
2875 if (!hdev_is_powered(hdev)) {
2876 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2877 MGMT_STATUS_NOT_POWERED, &rp,
2882 if (cp->addr.type == BDADDR_BREDR) {
2883 /* If disconnection is requested, then look up the
2884 * connection. If the remote device is connected, it
2885 * will be later used to terminate the link.
2887 * Setting it to NULL explicitly will cause no
2888 * termination of the link.
2891 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2896 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2900 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2903 /* Defer clearing up the connection parameters
2904 * until closing to give a chance of keeping
2905 * them if a repairing happens.
2907 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2909 /* If disconnection is not requested, then
2910 * clear the connection variable so that the
2911 * link is not terminated.
2913 if (!cp->disconnect)
2917 if (cp->addr.type == BDADDR_LE_PUBLIC)
2918 addr_type = ADDR_LE_DEV_PUBLIC;
2920 addr_type = ADDR_LE_DEV_RANDOM;
2922 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2924 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2928 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2929 MGMT_STATUS_NOT_PAIRED, &rp,
2934 /* If the connection variable is set, then termination of the
2935 * link is requested.
2938 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2940 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2944 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2951 cmd->cmd_complete = addr_cmd_complete;
2953 dc.handle = cpu_to_le16(conn->handle);
2954 dc.reason = 0x13; /* Remote User Terminated Connection */
2955 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2957 mgmt_pending_remove(cmd);
2960 hci_dev_unlock(hdev);
2964 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2967 struct mgmt_cp_disconnect *cp = data;
2968 struct mgmt_rp_disconnect rp;
2969 struct mgmt_pending_cmd *cmd;
2970 struct hci_conn *conn;
2975 memset(&rp, 0, sizeof(rp));
2976 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2977 rp.addr.type = cp->addr.type;
2979 if (!bdaddr_type_is_valid(cp->addr.type))
2980 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2981 MGMT_STATUS_INVALID_PARAMS,
2986 if (!test_bit(HCI_UP, &hdev->flags)) {
2987 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2988 MGMT_STATUS_NOT_POWERED, &rp,
2993 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2994 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2995 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2999 if (cp->addr.type == BDADDR_BREDR)
3000 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3003 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
3005 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3006 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3007 MGMT_STATUS_NOT_CONNECTED, &rp,
3012 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3018 cmd->cmd_complete = generic_cmd_complete;
3020 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3022 mgmt_pending_remove(cmd);
3025 hci_dev_unlock(hdev);
3029 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3031 switch (link_type) {
3033 switch (addr_type) {
3034 case ADDR_LE_DEV_PUBLIC:
3035 return BDADDR_LE_PUBLIC;
3038 /* Fallback to LE Random address type */
3039 return BDADDR_LE_RANDOM;
3043 /* Fallback to BR/EDR type */
3044 return BDADDR_BREDR;
3048 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3051 struct mgmt_rp_get_connections *rp;
3061 if (!hdev_is_powered(hdev)) {
3062 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3063 MGMT_STATUS_NOT_POWERED);
3068 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3069 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3073 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3074 rp = kmalloc(rp_len, GFP_KERNEL);
3081 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3082 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3084 bacpy(&rp->addr[i].bdaddr, &c->dst);
3085 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3086 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3091 rp->conn_count = cpu_to_le16(i);
3093 /* Recalculate length in case of filtered SCO connections, etc */
3094 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3096 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3102 hci_dev_unlock(hdev);
3106 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3107 struct mgmt_cp_pin_code_neg_reply *cp)
3109 struct mgmt_pending_cmd *cmd;
3112 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3117 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3118 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3120 mgmt_pending_remove(cmd);
3125 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3128 struct hci_conn *conn;
3129 struct mgmt_cp_pin_code_reply *cp = data;
3130 struct hci_cp_pin_code_reply reply;
3131 struct mgmt_pending_cmd *cmd;
3138 if (!hdev_is_powered(hdev)) {
3139 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3140 MGMT_STATUS_NOT_POWERED);
3144 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3146 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3147 MGMT_STATUS_NOT_CONNECTED);
3151 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3152 struct mgmt_cp_pin_code_neg_reply ncp;
3154 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3156 BT_ERR("PIN code is not 16 bytes long");
3158 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3160 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3161 MGMT_STATUS_INVALID_PARAMS);
3166 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3172 cmd->cmd_complete = addr_cmd_complete;
3174 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3175 reply.pin_len = cp->pin_len;
3176 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3178 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3180 mgmt_pending_remove(cmd);
3183 hci_dev_unlock(hdev);
3187 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3190 struct mgmt_cp_set_io_capability *cp = data;
3194 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3195 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3196 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3200 hdev->io_capability = cp->io_capability;
3202 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3203 hdev->io_capability);
3205 hci_dev_unlock(hdev);
3207 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3211 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3213 struct hci_dev *hdev = conn->hdev;
3214 struct mgmt_pending_cmd *cmd;
3216 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3217 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3220 if (cmd->user_data != conn)
3229 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3231 struct mgmt_rp_pair_device rp;
3232 struct hci_conn *conn = cmd->user_data;
3235 bacpy(&rp.addr.bdaddr, &conn->dst);
3236 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3238 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3239 status, &rp, sizeof(rp));
3241 /* So we don't get further callbacks for this connection */
3242 conn->connect_cfm_cb = NULL;
3243 conn->security_cfm_cb = NULL;
3244 conn->disconn_cfm_cb = NULL;
3246 hci_conn_drop(conn);
3248 /* The device is paired so there is no need to remove
3249 * its connection parameters anymore.
3251 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3258 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3260 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3261 struct mgmt_pending_cmd *cmd;
3263 cmd = find_pairing(conn);
3265 cmd->cmd_complete(cmd, status);
3266 mgmt_pending_remove(cmd);
3270 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3272 struct mgmt_pending_cmd *cmd;
3274 BT_DBG("status %u", status);
3276 cmd = find_pairing(conn);
3278 BT_DBG("Unable to find a pending command");
3282 cmd->cmd_complete(cmd, mgmt_status(status));
3283 mgmt_pending_remove(cmd);
3286 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3288 struct mgmt_pending_cmd *cmd;
3290 BT_DBG("status %u", status);
3295 cmd = find_pairing(conn);
3297 BT_DBG("Unable to find a pending command");
3301 cmd->cmd_complete(cmd, mgmt_status(status));
3302 mgmt_pending_remove(cmd);
3305 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3308 struct mgmt_cp_pair_device *cp = data;
3309 struct mgmt_rp_pair_device rp;
3310 struct mgmt_pending_cmd *cmd;
3311 u8 sec_level, auth_type;
3312 struct hci_conn *conn;
3317 memset(&rp, 0, sizeof(rp));
3318 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3319 rp.addr.type = cp->addr.type;
3321 if (!bdaddr_type_is_valid(cp->addr.type))
3322 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3323 MGMT_STATUS_INVALID_PARAMS,
3326 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3327 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3328 MGMT_STATUS_INVALID_PARAMS,
3333 if (!hdev_is_powered(hdev)) {
3334 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3335 MGMT_STATUS_NOT_POWERED, &rp,
3340 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3341 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3342 MGMT_STATUS_ALREADY_PAIRED, &rp,
3347 sec_level = BT_SECURITY_MEDIUM;
3348 auth_type = HCI_AT_DEDICATED_BONDING;
3350 if (cp->addr.type == BDADDR_BREDR) {
3351 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3356 /* Convert from L2CAP channel address type to HCI address type
3358 if (cp->addr.type == BDADDR_LE_PUBLIC)
3359 addr_type = ADDR_LE_DEV_PUBLIC;
3361 addr_type = ADDR_LE_DEV_RANDOM;
3363 /* When pairing a new device, it is expected to remember
3364 * this device for future connections. Adding the connection
3365 * parameter information ahead of time allows tracking
3366 * of the slave preferred values and will speed up any
3367 * further connection establishment.
3369 * If connection parameters already exist, then they
3370 * will be kept and this function does nothing.
3372 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3374 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3375 sec_level, HCI_LE_CONN_TIMEOUT,
3382 if (PTR_ERR(conn) == -EBUSY)
3383 status = MGMT_STATUS_BUSY;
3384 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3385 status = MGMT_STATUS_NOT_SUPPORTED;
3386 else if (PTR_ERR(conn) == -ECONNREFUSED)
3387 status = MGMT_STATUS_REJECTED;
3389 status = MGMT_STATUS_CONNECT_FAILED;
3391 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3392 status, &rp, sizeof(rp));
3396 if (conn->connect_cfm_cb) {
3397 hci_conn_drop(conn);
3398 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3399 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3403 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3406 hci_conn_drop(conn);
3410 cmd->cmd_complete = pairing_complete;
3412 /* For LE, just connecting isn't a proof that the pairing finished */
3413 if (cp->addr.type == BDADDR_BREDR) {
3414 conn->connect_cfm_cb = pairing_complete_cb;
3415 conn->security_cfm_cb = pairing_complete_cb;
3416 conn->disconn_cfm_cb = pairing_complete_cb;
3418 conn->connect_cfm_cb = le_pairing_complete_cb;
3419 conn->security_cfm_cb = le_pairing_complete_cb;
3420 conn->disconn_cfm_cb = le_pairing_complete_cb;
3423 conn->io_capability = cp->io_cap;
3424 cmd->user_data = hci_conn_get(conn);
3426 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3427 hci_conn_security(conn, sec_level, auth_type, true)) {
3428 cmd->cmd_complete(cmd, 0);
3429 mgmt_pending_remove(cmd);
3435 hci_dev_unlock(hdev);
3439 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3442 struct mgmt_addr_info *addr = data;
3443 struct mgmt_pending_cmd *cmd;
3444 struct hci_conn *conn;
3451 if (!hdev_is_powered(hdev)) {
3452 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3453 MGMT_STATUS_NOT_POWERED);
3457 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3459 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3460 MGMT_STATUS_INVALID_PARAMS);
3464 conn = cmd->user_data;
3466 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3467 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3468 MGMT_STATUS_INVALID_PARAMS);
3472 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3473 mgmt_pending_remove(cmd);
3475 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3476 addr, sizeof(*addr));
3478 hci_dev_unlock(hdev);
3482 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3483 struct mgmt_addr_info *addr, u16 mgmt_op,
3484 u16 hci_op, __le32 passkey)
3486 struct mgmt_pending_cmd *cmd;
3487 struct hci_conn *conn;
3492 if (!hdev_is_powered(hdev)) {
3493 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3494 MGMT_STATUS_NOT_POWERED, addr,
3499 if (addr->type == BDADDR_BREDR)
3500 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3502 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3505 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3506 MGMT_STATUS_NOT_CONNECTED, addr,
3511 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3512 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3514 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3515 MGMT_STATUS_SUCCESS, addr,
3518 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3519 MGMT_STATUS_FAILED, addr,
3525 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3531 cmd->cmd_complete = addr_cmd_complete;
3533 /* Continue with pairing via HCI */
3534 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3535 struct hci_cp_user_passkey_reply cp;
3537 bacpy(&cp.bdaddr, &addr->bdaddr);
3538 cp.passkey = passkey;
3539 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3541 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3545 mgmt_pending_remove(cmd);
3548 hci_dev_unlock(hdev);
3552 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3553 void *data, u16 len)
3555 struct mgmt_cp_pin_code_neg_reply *cp = data;
3559 return user_pairing_resp(sk, hdev, &cp->addr,
3560 MGMT_OP_PIN_CODE_NEG_REPLY,
3561 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3564 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3567 struct mgmt_cp_user_confirm_reply *cp = data;
3571 if (len != sizeof(*cp))
3572 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3573 MGMT_STATUS_INVALID_PARAMS);
3575 return user_pairing_resp(sk, hdev, &cp->addr,
3576 MGMT_OP_USER_CONFIRM_REPLY,
3577 HCI_OP_USER_CONFIRM_REPLY, 0);
3580 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3581 void *data, u16 len)
3583 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3587 return user_pairing_resp(sk, hdev, &cp->addr,
3588 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3589 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3592 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3595 struct mgmt_cp_user_passkey_reply *cp = data;
3599 return user_pairing_resp(sk, hdev, &cp->addr,
3600 MGMT_OP_USER_PASSKEY_REPLY,
3601 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3604 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3605 void *data, u16 len)
3607 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3611 return user_pairing_resp(sk, hdev, &cp->addr,
3612 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3613 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3616 static void update_name(struct hci_request *req)
3618 struct hci_dev *hdev = req->hdev;
3619 struct hci_cp_write_local_name cp;
3621 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3623 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3626 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3628 struct mgmt_cp_set_local_name *cp;
3629 struct mgmt_pending_cmd *cmd;
3631 BT_DBG("status 0x%02x", status);
3635 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3642 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3643 mgmt_status(status));
3645 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3648 mgmt_pending_remove(cmd);
3651 hci_dev_unlock(hdev);
3654 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3657 struct mgmt_cp_set_local_name *cp = data;
3658 struct mgmt_pending_cmd *cmd;
3659 struct hci_request req;
3666 /* If the old values are the same as the new ones just return a
3667 * direct command complete event.
3669 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3670 !memcmp(hdev->short_name, cp->short_name,
3671 sizeof(hdev->short_name))) {
3672 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3677 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3679 if (!hdev_is_powered(hdev)) {
3680 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3682 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3687 err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3693 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3699 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3701 hci_req_init(&req, hdev);
3703 if (lmp_bredr_capable(hdev)) {
3708 /* The name is stored in the scan response data and so
3709 * no need to udpate the advertising data here.
3711 if (lmp_le_capable(hdev))
3712 update_scan_rsp_data(&req);
3714 err = hci_req_run(&req, set_name_complete);
3716 mgmt_pending_remove(cmd);
3719 hci_dev_unlock(hdev);
3723 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3724 void *data, u16 data_len)
3726 struct mgmt_pending_cmd *cmd;
3729 BT_DBG("%s", hdev->name);
3733 if (!hdev_is_powered(hdev)) {
3734 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3735 MGMT_STATUS_NOT_POWERED);
3739 if (!lmp_ssp_capable(hdev)) {
3740 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3741 MGMT_STATUS_NOT_SUPPORTED);
3745 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3746 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3751 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3757 if (bredr_sc_enabled(hdev))
3758 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3761 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3764 mgmt_pending_remove(cmd);
3767 hci_dev_unlock(hdev);
3771 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3772 void *data, u16 len)
3774 struct mgmt_addr_info *addr = data;
3777 BT_DBG("%s ", hdev->name);
3779 if (!bdaddr_type_is_valid(addr->type))
3780 return mgmt_cmd_complete(sk, hdev->id,
3781 MGMT_OP_ADD_REMOTE_OOB_DATA,
3782 MGMT_STATUS_INVALID_PARAMS,
3783 addr, sizeof(*addr));
3787 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3788 struct mgmt_cp_add_remote_oob_data *cp = data;
3791 if (cp->addr.type != BDADDR_BREDR) {
3792 err = mgmt_cmd_complete(sk, hdev->id,
3793 MGMT_OP_ADD_REMOTE_OOB_DATA,
3794 MGMT_STATUS_INVALID_PARAMS,
3795 &cp->addr, sizeof(cp->addr));
3799 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3800 cp->addr.type, cp->hash,
3801 cp->rand, NULL, NULL);
3803 status = MGMT_STATUS_FAILED;
3805 status = MGMT_STATUS_SUCCESS;
3807 err = mgmt_cmd_complete(sk, hdev->id,
3808 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3809 &cp->addr, sizeof(cp->addr));
3810 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3811 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3812 u8 *rand192, *hash192, *rand256, *hash256;
3815 if (bdaddr_type_is_le(cp->addr.type)) {
3816 /* Enforce zero-valued 192-bit parameters as
3817 * long as legacy SMP OOB isn't implemented.
3819 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3820 memcmp(cp->hash192, ZERO_KEY, 16)) {
3821 err = mgmt_cmd_complete(sk, hdev->id,
3822 MGMT_OP_ADD_REMOTE_OOB_DATA,
3823 MGMT_STATUS_INVALID_PARAMS,
3824 addr, sizeof(*addr));
3831 /* In case one of the P-192 values is set to zero,
3832 * then just disable OOB data for P-192.
3834 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3835 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3839 rand192 = cp->rand192;
3840 hash192 = cp->hash192;
3844 /* In case one of the P-256 values is set to zero, then just
3845 * disable OOB data for P-256.
3847 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3848 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3852 rand256 = cp->rand256;
3853 hash256 = cp->hash256;
3856 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3857 cp->addr.type, hash192, rand192,
3860 status = MGMT_STATUS_FAILED;
3862 status = MGMT_STATUS_SUCCESS;
3864 err = mgmt_cmd_complete(sk, hdev->id,
3865 MGMT_OP_ADD_REMOTE_OOB_DATA,
3866 status, &cp->addr, sizeof(cp->addr));
3868 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3869 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3870 MGMT_STATUS_INVALID_PARAMS);
3874 hci_dev_unlock(hdev);
3878 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3879 void *data, u16 len)
3881 struct mgmt_cp_remove_remote_oob_data *cp = data;
3885 BT_DBG("%s", hdev->name);
3887 if (cp->addr.type != BDADDR_BREDR)
3888 return mgmt_cmd_complete(sk, hdev->id,
3889 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3890 MGMT_STATUS_INVALID_PARAMS,
3891 &cp->addr, sizeof(cp->addr));
3895 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3896 hci_remote_oob_data_clear(hdev);
3897 status = MGMT_STATUS_SUCCESS;
3901 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3903 status = MGMT_STATUS_INVALID_PARAMS;
3905 status = MGMT_STATUS_SUCCESS;
3908 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3909 status, &cp->addr, sizeof(cp->addr));
3911 hci_dev_unlock(hdev);
3915 static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
3917 struct hci_dev *hdev = req->hdev;
3918 struct hci_cp_inquiry cp;
3919 /* General inquiry access code (GIAC) */
3920 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3922 *status = mgmt_bredr_support(hdev);
3926 if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
3927 *status = MGMT_STATUS_BUSY;
3931 hci_inquiry_cache_flush(hdev);
3933 memset(&cp, 0, sizeof(cp));
3934 memcpy(&cp.lap, lap, sizeof(cp.lap));
3935 cp.length = DISCOV_BREDR_INQUIRY_LEN;
3937 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3942 static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
3944 struct hci_dev *hdev = req->hdev;
3945 struct hci_cp_le_set_scan_param param_cp;
3946 struct hci_cp_le_set_scan_enable enable_cp;
3950 *status = mgmt_le_support(hdev);
3954 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
3955 /* Don't let discovery abort an outgoing connection attempt
3956 * that's using directed advertising.
3958 if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3959 *status = MGMT_STATUS_REJECTED;
3963 disable_advertising(req);
3966 /* If controller is scanning, it means the background scanning is
3967 * running. Thus, we should temporarily stop it in order to set the
3968 * discovery scanning parameters.
3970 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
3971 hci_req_add_le_scan_disable(req);
3973 /* All active scans will be done with either a resolvable private
3974 * address (when privacy feature has been enabled) or non-resolvable
3977 err = hci_update_random_address(req, true, &own_addr_type);
3979 *status = MGMT_STATUS_FAILED;
3983 memset(¶m_cp, 0, sizeof(param_cp));
3984 param_cp.type = LE_SCAN_ACTIVE;
3985 param_cp.interval = cpu_to_le16(interval);
3986 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3987 param_cp.own_address_type = own_addr_type;
3989 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3992 memset(&enable_cp, 0, sizeof(enable_cp));
3993 enable_cp.enable = LE_SCAN_ENABLE;
3994 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3996 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
4002 static bool trigger_discovery(struct hci_request *req, u8 *status)
4004 struct hci_dev *hdev = req->hdev;
4006 switch (hdev->discovery.type) {
4007 case DISCOV_TYPE_BREDR:
4008 if (!trigger_bredr_inquiry(req, status))
4012 case DISCOV_TYPE_INTERLEAVED:
4013 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
4015 /* During simultaneous discovery, we double LE scan
4016 * interval. We must leave some time for the controller
4017 * to do BR/EDR inquiry.
4019 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
4023 if (!trigger_bredr_inquiry(req, status))
4029 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4030 *status = MGMT_STATUS_NOT_SUPPORTED;
4035 case DISCOV_TYPE_LE:
4036 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
4041 *status = MGMT_STATUS_INVALID_PARAMS;
4048 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
4051 struct mgmt_pending_cmd *cmd;
4052 unsigned long timeout;
4054 BT_DBG("status %d", status);
4058 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4060 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4063 cmd->cmd_complete(cmd, mgmt_status(status));
4064 mgmt_pending_remove(cmd);
4068 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4072 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
4074 /* If the scan involves LE scan, pick proper timeout to schedule
4075 * hdev->le_scan_disable that will stop it.
4077 switch (hdev->discovery.type) {
4078 case DISCOV_TYPE_LE:
4079 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4081 case DISCOV_TYPE_INTERLEAVED:
4082 /* When running simultaneous discovery, the LE scanning time
4083 * should occupy the whole discovery time sine BR/EDR inquiry
4084 * and LE scanning are scheduled by the controller.
4086 * For interleaving discovery in comparison, BR/EDR inquiry
4087 * and LE scanning are done sequentially with separate
4090 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
4091 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4093 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4095 case DISCOV_TYPE_BREDR:
4099 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4105 /* When service discovery is used and the controller has
4106 * a strict duplicate filter, it is important to remember
4107 * the start and duration of the scan. This is required
4108 * for restarting scanning during the discovery phase.
4110 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
4112 hdev->discovery.result_filtering) {
4113 hdev->discovery.scan_start = jiffies;
4114 hdev->discovery.scan_duration = timeout;
4117 queue_delayed_work(hdev->workqueue,
4118 &hdev->le_scan_disable, timeout);
4122 hci_dev_unlock(hdev);
4125 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4126 void *data, u16 len)
4128 struct mgmt_cp_start_discovery *cp = data;
4129 struct mgmt_pending_cmd *cmd;
4130 struct hci_request req;
4134 BT_DBG("%s", hdev->name);
4138 if (!hdev_is_powered(hdev)) {
4139 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4140 MGMT_STATUS_NOT_POWERED,
4141 &cp->type, sizeof(cp->type));
4145 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4146 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4147 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4148 MGMT_STATUS_BUSY, &cp->type,
4153 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4159 cmd->cmd_complete = generic_cmd_complete;
4161 /* Clear the discovery filter first to free any previously
4162 * allocated memory for the UUID list.
4164 hci_discovery_filter_clear(hdev);
4166 hdev->discovery.type = cp->type;
4167 hdev->discovery.report_invalid_rssi = false;
4169 hci_req_init(&req, hdev);
4171 if (!trigger_discovery(&req, &status)) {
4172 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4173 status, &cp->type, sizeof(cp->type));
4174 mgmt_pending_remove(cmd);
4178 err = hci_req_run(&req, start_discovery_complete);
4180 mgmt_pending_remove(cmd);
4184 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4187 hci_dev_unlock(hdev);
4191 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4194 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4198 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4199 void *data, u16 len)
4201 struct mgmt_cp_start_service_discovery *cp = data;
4202 struct mgmt_pending_cmd *cmd;
4203 struct hci_request req;
4204 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4205 u16 uuid_count, expected_len;
4209 BT_DBG("%s", hdev->name);
4213 if (!hdev_is_powered(hdev)) {
4214 err = mgmt_cmd_complete(sk, hdev->id,
4215 MGMT_OP_START_SERVICE_DISCOVERY,
4216 MGMT_STATUS_NOT_POWERED,
4217 &cp->type, sizeof(cp->type));
4221 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4222 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4223 err = mgmt_cmd_complete(sk, hdev->id,
4224 MGMT_OP_START_SERVICE_DISCOVERY,
4225 MGMT_STATUS_BUSY, &cp->type,
4230 uuid_count = __le16_to_cpu(cp->uuid_count);
4231 if (uuid_count > max_uuid_count) {
4232 BT_ERR("service_discovery: too big uuid_count value %u",
4234 err = mgmt_cmd_complete(sk, hdev->id,
4235 MGMT_OP_START_SERVICE_DISCOVERY,
4236 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4241 expected_len = sizeof(*cp) + uuid_count * 16;
4242 if (expected_len != len) {
4243 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4245 err = mgmt_cmd_complete(sk, hdev->id,
4246 MGMT_OP_START_SERVICE_DISCOVERY,
4247 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4252 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4259 cmd->cmd_complete = service_discovery_cmd_complete;
4261 /* Clear the discovery filter first to free any previously
4262 * allocated memory for the UUID list.
4264 hci_discovery_filter_clear(hdev);
4266 hdev->discovery.result_filtering = true;
4267 hdev->discovery.type = cp->type;
4268 hdev->discovery.rssi = cp->rssi;
4269 hdev->discovery.uuid_count = uuid_count;
4271 if (uuid_count > 0) {
4272 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4274 if (!hdev->discovery.uuids) {
4275 err = mgmt_cmd_complete(sk, hdev->id,
4276 MGMT_OP_START_SERVICE_DISCOVERY,
4278 &cp->type, sizeof(cp->type));
4279 mgmt_pending_remove(cmd);
4284 hci_req_init(&req, hdev);
4286 if (!trigger_discovery(&req, &status)) {
4287 err = mgmt_cmd_complete(sk, hdev->id,
4288 MGMT_OP_START_SERVICE_DISCOVERY,
4289 status, &cp->type, sizeof(cp->type));
4290 mgmt_pending_remove(cmd);
4294 err = hci_req_run(&req, start_discovery_complete);
4296 mgmt_pending_remove(cmd);
4300 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4303 hci_dev_unlock(hdev);
4307 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4309 struct mgmt_pending_cmd *cmd;
4311 BT_DBG("status %d", status);
4315 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4317 cmd->cmd_complete(cmd, mgmt_status(status));
4318 mgmt_pending_remove(cmd);
4322 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4324 hci_dev_unlock(hdev);
4327 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4330 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4331 struct mgmt_pending_cmd *cmd;
4332 struct hci_request req;
4335 BT_DBG("%s", hdev->name);
4339 if (!hci_discovery_active(hdev)) {
4340 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4341 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4342 sizeof(mgmt_cp->type));
4346 if (hdev->discovery.type != mgmt_cp->type) {
4347 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4348 MGMT_STATUS_INVALID_PARAMS,
4349 &mgmt_cp->type, sizeof(mgmt_cp->type));
4353 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4359 cmd->cmd_complete = generic_cmd_complete;
4361 hci_req_init(&req, hdev);
4363 hci_stop_discovery(&req);
4365 err = hci_req_run(&req, stop_discovery_complete);
4367 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4371 mgmt_pending_remove(cmd);
4373 /* If no HCI commands were sent we're done */
4374 if (err == -ENODATA) {
4375 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4376 &mgmt_cp->type, sizeof(mgmt_cp->type));
4377 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4381 hci_dev_unlock(hdev);
4385 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4388 struct mgmt_cp_confirm_name *cp = data;
4389 struct inquiry_entry *e;
4392 BT_DBG("%s", hdev->name);
4396 if (!hci_discovery_active(hdev)) {
4397 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4398 MGMT_STATUS_FAILED, &cp->addr,
4403 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4405 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4406 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4411 if (cp->name_known) {
4412 e->name_state = NAME_KNOWN;
4415 e->name_state = NAME_NEEDED;
4416 hci_inquiry_cache_update_resolve(hdev, e);
4419 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4420 &cp->addr, sizeof(cp->addr));
4423 hci_dev_unlock(hdev);
4427 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4430 struct mgmt_cp_block_device *cp = data;
4434 BT_DBG("%s", hdev->name);
4436 if (!bdaddr_type_is_valid(cp->addr.type))
4437 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4438 MGMT_STATUS_INVALID_PARAMS,
4439 &cp->addr, sizeof(cp->addr));
4443 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4446 status = MGMT_STATUS_FAILED;
4450 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4452 status = MGMT_STATUS_SUCCESS;
4455 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4456 &cp->addr, sizeof(cp->addr));
4458 hci_dev_unlock(hdev);
4463 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4466 struct mgmt_cp_unblock_device *cp = data;
4470 BT_DBG("%s", hdev->name);
4472 if (!bdaddr_type_is_valid(cp->addr.type))
4473 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4474 MGMT_STATUS_INVALID_PARAMS,
4475 &cp->addr, sizeof(cp->addr));
4479 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4482 status = MGMT_STATUS_INVALID_PARAMS;
4486 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4488 status = MGMT_STATUS_SUCCESS;
4491 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4492 &cp->addr, sizeof(cp->addr));
4494 hci_dev_unlock(hdev);
4499 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4502 struct mgmt_cp_set_device_id *cp = data;
4503 struct hci_request req;
4507 BT_DBG("%s", hdev->name);
4509 source = __le16_to_cpu(cp->source);
4511 if (source > 0x0002)
4512 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4513 MGMT_STATUS_INVALID_PARAMS);
4517 hdev->devid_source = source;
4518 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4519 hdev->devid_product = __le16_to_cpu(cp->product);
4520 hdev->devid_version = __le16_to_cpu(cp->version);
4522 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4525 hci_req_init(&req, hdev);
4527 hci_req_run(&req, NULL);
4529 hci_dev_unlock(hdev);
4534 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4537 BT_DBG("status %d", status);
4540 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4543 struct cmd_lookup match = { NULL, hdev };
4544 struct hci_request req;
4549 u8 mgmt_err = mgmt_status(status);
4551 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4552 cmd_status_rsp, &mgmt_err);
4556 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4557 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4559 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4561 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4564 new_settings(hdev, match.sk);
4569 /* If "Set Advertising" was just disabled and instance advertising was
4570 * set up earlier, then enable the advertising instance.
4572 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4573 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
4576 hci_req_init(&req, hdev);
4578 update_adv_data(&req);
4579 enable_advertising(&req);
4581 if (hci_req_run(&req, enable_advertising_instance) < 0)
4582 BT_ERR("Failed to re-configure advertising");
4585 hci_dev_unlock(hdev);
4588 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4591 struct mgmt_mode *cp = data;
4592 struct mgmt_pending_cmd *cmd;
4593 struct hci_request req;
4597 BT_DBG("request for %s", hdev->name);
4599 status = mgmt_le_support(hdev);
4601 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4604 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4605 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4606 MGMT_STATUS_INVALID_PARAMS);
4612 /* The following conditions are ones which mean that we should
4613 * not do any HCI communication but directly send a mgmt
4614 * response to user space (after toggling the flag if
4617 if (!hdev_is_powered(hdev) ||
4618 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4619 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4620 hci_conn_num(hdev, LE_LINK) > 0 ||
4621 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4622 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4626 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4627 if (cp->val == 0x02)
4628 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4630 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4632 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4633 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4636 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4641 err = new_settings(hdev, sk);
4646 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4647 pending_find(MGMT_OP_SET_LE, hdev)) {
4648 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4653 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4659 hci_req_init(&req, hdev);
4661 if (cp->val == 0x02)
4662 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4664 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4667 /* Switch to instance "0" for the Set Advertising setting. */
4668 update_adv_data_for_instance(&req, 0);
4669 update_scan_rsp_data_for_instance(&req, 0);
4670 enable_advertising(&req);
4672 disable_advertising(&req);
4675 err = hci_req_run(&req, set_advertising_complete);
4677 mgmt_pending_remove(cmd);
4680 hci_dev_unlock(hdev);
4684 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4685 void *data, u16 len)
4687 struct mgmt_cp_set_static_address *cp = data;
4690 BT_DBG("%s", hdev->name);
4692 if (!lmp_le_capable(hdev))
4693 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4694 MGMT_STATUS_NOT_SUPPORTED);
4696 if (hdev_is_powered(hdev))
4697 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4698 MGMT_STATUS_REJECTED);
4700 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4701 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4702 return mgmt_cmd_status(sk, hdev->id,
4703 MGMT_OP_SET_STATIC_ADDRESS,
4704 MGMT_STATUS_INVALID_PARAMS);
4706 /* Two most significant bits shall be set */
4707 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4708 return mgmt_cmd_status(sk, hdev->id,
4709 MGMT_OP_SET_STATIC_ADDRESS,
4710 MGMT_STATUS_INVALID_PARAMS);
4715 bacpy(&hdev->static_addr, &cp->bdaddr);
4717 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4721 err = new_settings(hdev, sk);
4724 hci_dev_unlock(hdev);
4728 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4729 void *data, u16 len)
4731 struct mgmt_cp_set_scan_params *cp = data;
4732 __u16 interval, window;
4735 BT_DBG("%s", hdev->name);
4737 if (!lmp_le_capable(hdev))
4738 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4739 MGMT_STATUS_NOT_SUPPORTED);
4741 interval = __le16_to_cpu(cp->interval);
4743 if (interval < 0x0004 || interval > 0x4000)
4744 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4745 MGMT_STATUS_INVALID_PARAMS);
4747 window = __le16_to_cpu(cp->window);
4749 if (window < 0x0004 || window > 0x4000)
4750 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4751 MGMT_STATUS_INVALID_PARAMS);
4753 if (window > interval)
4754 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4755 MGMT_STATUS_INVALID_PARAMS);
4759 hdev->le_scan_interval = interval;
4760 hdev->le_scan_window = window;
4762 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4765 /* If background scan is running, restart it so new parameters are
4768 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4769 hdev->discovery.state == DISCOVERY_STOPPED) {
4770 struct hci_request req;
4772 hci_req_init(&req, hdev);
4774 hci_req_add_le_scan_disable(&req);
4775 hci_req_add_le_passive_scan(&req);
4777 hci_req_run(&req, NULL);
4780 hci_dev_unlock(hdev);
4785 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4788 struct mgmt_pending_cmd *cmd;
4790 BT_DBG("status 0x%02x", status);
4794 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4799 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4800 mgmt_status(status));
4802 struct mgmt_mode *cp = cmd->param;
4805 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4807 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4809 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4810 new_settings(hdev, cmd->sk);
4813 mgmt_pending_remove(cmd);
4816 hci_dev_unlock(hdev);
4819 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4820 void *data, u16 len)
4822 struct mgmt_mode *cp = data;
4823 struct mgmt_pending_cmd *cmd;
4824 struct hci_request req;
4827 BT_DBG("%s", hdev->name);
4829 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4830 hdev->hci_ver < BLUETOOTH_VER_1_2)
4831 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4832 MGMT_STATUS_NOT_SUPPORTED);
4834 if (cp->val != 0x00 && cp->val != 0x01)
4835 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4836 MGMT_STATUS_INVALID_PARAMS);
4840 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4841 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4846 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4847 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4852 if (!hdev_is_powered(hdev)) {
4853 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4854 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4856 new_settings(hdev, sk);
4860 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4867 hci_req_init(&req, hdev);
4869 write_fast_connectable(&req, cp->val);
4871 err = hci_req_run(&req, fast_connectable_complete);
4873 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4874 MGMT_STATUS_FAILED);
4875 mgmt_pending_remove(cmd);
4879 hci_dev_unlock(hdev);
4884 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4886 struct mgmt_pending_cmd *cmd;
4888 BT_DBG("status 0x%02x", status);
4892 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4897 u8 mgmt_err = mgmt_status(status);
4899 /* We need to restore the flag if related HCI commands
4902 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4904 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4906 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4907 new_settings(hdev, cmd->sk);
4910 mgmt_pending_remove(cmd);
4913 hci_dev_unlock(hdev);
4916 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4918 struct mgmt_mode *cp = data;
4919 struct mgmt_pending_cmd *cmd;
4920 struct hci_request req;
4923 BT_DBG("request for %s", hdev->name);
4925 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4926 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4927 MGMT_STATUS_NOT_SUPPORTED);
4929 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4930 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4931 MGMT_STATUS_REJECTED);
4933 if (cp->val != 0x00 && cp->val != 0x01)
4934 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4935 MGMT_STATUS_INVALID_PARAMS);
4939 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4940 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4944 if (!hdev_is_powered(hdev)) {
4946 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4947 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4948 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4949 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4950 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4953 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4955 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4959 err = new_settings(hdev, sk);
4963 /* Reject disabling when powered on */
4965 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4966 MGMT_STATUS_REJECTED);
4969 /* When configuring a dual-mode controller to operate
4970 * with LE only and using a static address, then switching
4971 * BR/EDR back on is not allowed.
4973 * Dual-mode controllers shall operate with the public
4974 * address as its identity address for BR/EDR and LE. So
4975 * reject the attempt to create an invalid configuration.
4977 * The same restrictions applies when secure connections
4978 * has been enabled. For BR/EDR this is a controller feature
4979 * while for LE it is a host stack feature. This means that
4980 * switching BR/EDR back on when secure connections has been
4981 * enabled is not a supported transaction.
4983 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4984 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4985 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4986 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4987 MGMT_STATUS_REJECTED);
4992 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4993 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4998 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5004 /* We need to flip the bit already here so that update_adv_data
5005 * generates the correct flags.
5007 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5009 hci_req_init(&req, hdev);
5011 write_fast_connectable(&req, false);
5012 __hci_update_page_scan(&req);
5014 /* Since only the advertising data flags will change, there
5015 * is no need to update the scan response data.
5017 update_adv_data(&req);
5019 err = hci_req_run(&req, set_bredr_complete);
5021 mgmt_pending_remove(cmd);
5024 hci_dev_unlock(hdev);
5028 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5030 struct mgmt_pending_cmd *cmd;
5031 struct mgmt_mode *cp;
5033 BT_DBG("%s status %u", hdev->name, status);
5037 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5042 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5043 mgmt_status(status));
5051 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5052 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5055 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5056 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5059 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5060 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5064 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5065 new_settings(hdev, cmd->sk);
5068 mgmt_pending_remove(cmd);
5070 hci_dev_unlock(hdev);
5073 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5074 void *data, u16 len)
5076 struct mgmt_mode *cp = data;
5077 struct mgmt_pending_cmd *cmd;
5078 struct hci_request req;
5082 BT_DBG("request for %s", hdev->name);
5084 if (!lmp_sc_capable(hdev) &&
5085 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5086 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5087 MGMT_STATUS_NOT_SUPPORTED);
5089 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5090 lmp_sc_capable(hdev) &&
5091 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5092 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5093 MGMT_STATUS_REJECTED);
5095 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5096 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5097 MGMT_STATUS_INVALID_PARAMS);
5101 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5102 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5106 changed = !hci_dev_test_and_set_flag(hdev,
5108 if (cp->val == 0x02)
5109 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5111 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5113 changed = hci_dev_test_and_clear_flag(hdev,
5115 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5118 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5123 err = new_settings(hdev, sk);
5128 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5129 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5136 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5137 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5138 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5142 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5148 hci_req_init(&req, hdev);
5149 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5150 err = hci_req_run(&req, sc_enable_complete);
5152 mgmt_pending_remove(cmd);
5157 hci_dev_unlock(hdev);
5161 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5162 void *data, u16 len)
5164 struct mgmt_mode *cp = data;
5165 bool changed, use_changed;
5168 BT_DBG("request for %s", hdev->name);
5170 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5171 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5172 MGMT_STATUS_INVALID_PARAMS);
5177 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5179 changed = hci_dev_test_and_clear_flag(hdev,
5180 HCI_KEEP_DEBUG_KEYS);
5182 if (cp->val == 0x02)
5183 use_changed = !hci_dev_test_and_set_flag(hdev,
5184 HCI_USE_DEBUG_KEYS);
5186 use_changed = hci_dev_test_and_clear_flag(hdev,
5187 HCI_USE_DEBUG_KEYS);
5189 if (hdev_is_powered(hdev) && use_changed &&
5190 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5191 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5192 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5193 sizeof(mode), &mode);
5196 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5201 err = new_settings(hdev, sk);
5204 hci_dev_unlock(hdev);
5208 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5211 struct mgmt_cp_set_privacy *cp = cp_data;
5215 BT_DBG("request for %s", hdev->name);
5217 if (!lmp_le_capable(hdev))
5218 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5219 MGMT_STATUS_NOT_SUPPORTED);
5221 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5222 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5223 MGMT_STATUS_INVALID_PARAMS);
5225 if (hdev_is_powered(hdev))
5226 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5227 MGMT_STATUS_REJECTED);
5231 /* If user space supports this command it is also expected to
5232 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5234 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5237 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5238 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5239 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5241 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5242 memset(hdev->irk, 0, sizeof(hdev->irk));
5243 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5246 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5251 err = new_settings(hdev, sk);
5254 hci_dev_unlock(hdev);
5258 static bool irk_is_valid(struct mgmt_irk_info *irk)
5260 switch (irk->addr.type) {
5261 case BDADDR_LE_PUBLIC:
5264 case BDADDR_LE_RANDOM:
5265 /* Two most significant bits shall be set */
5266 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5274 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5277 struct mgmt_cp_load_irks *cp = cp_data;
5278 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5279 sizeof(struct mgmt_irk_info));
5280 u16 irk_count, expected_len;
5283 BT_DBG("request for %s", hdev->name);
5285 if (!lmp_le_capable(hdev))
5286 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5287 MGMT_STATUS_NOT_SUPPORTED);
5289 irk_count = __le16_to_cpu(cp->irk_count);
5290 if (irk_count > max_irk_count) {
5291 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5292 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5293 MGMT_STATUS_INVALID_PARAMS);
5296 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5297 if (expected_len != len) {
5298 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5300 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5301 MGMT_STATUS_INVALID_PARAMS);
5304 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5306 for (i = 0; i < irk_count; i++) {
5307 struct mgmt_irk_info *key = &cp->irks[i];
5309 if (!irk_is_valid(key))
5310 return mgmt_cmd_status(sk, hdev->id,
5312 MGMT_STATUS_INVALID_PARAMS);
5317 hci_smp_irks_clear(hdev);
5319 for (i = 0; i < irk_count; i++) {
5320 struct mgmt_irk_info *irk = &cp->irks[i];
5323 if (irk->addr.type == BDADDR_LE_PUBLIC)
5324 addr_type = ADDR_LE_DEV_PUBLIC;
5326 addr_type = ADDR_LE_DEV_RANDOM;
5328 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5332 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5334 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5336 hci_dev_unlock(hdev);
5341 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5343 if (key->master != 0x00 && key->master != 0x01)
5346 switch (key->addr.type) {
5347 case BDADDR_LE_PUBLIC:
5350 case BDADDR_LE_RANDOM:
5351 /* Two most significant bits shall be set */
5352 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5360 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5361 void *cp_data, u16 len)
5363 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5364 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5365 sizeof(struct mgmt_ltk_info));
5366 u16 key_count, expected_len;
5369 BT_DBG("request for %s", hdev->name);
5371 if (!lmp_le_capable(hdev))
5372 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5373 MGMT_STATUS_NOT_SUPPORTED);
5375 key_count = __le16_to_cpu(cp->key_count);
5376 if (key_count > max_key_count) {
5377 BT_ERR("load_ltks: too big key_count value %u", key_count);
5378 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5379 MGMT_STATUS_INVALID_PARAMS);
5382 expected_len = sizeof(*cp) + key_count *
5383 sizeof(struct mgmt_ltk_info);
5384 if (expected_len != len) {
5385 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5387 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5388 MGMT_STATUS_INVALID_PARAMS);
5391 BT_DBG("%s key_count %u", hdev->name, key_count);
5393 for (i = 0; i < key_count; i++) {
5394 struct mgmt_ltk_info *key = &cp->keys[i];
5396 if (!ltk_is_valid(key))
5397 return mgmt_cmd_status(sk, hdev->id,
5398 MGMT_OP_LOAD_LONG_TERM_KEYS,
5399 MGMT_STATUS_INVALID_PARAMS);
5404 hci_smp_ltks_clear(hdev);
5406 for (i = 0; i < key_count; i++) {
5407 struct mgmt_ltk_info *key = &cp->keys[i];
5408 u8 type, addr_type, authenticated;
5410 if (key->addr.type == BDADDR_LE_PUBLIC)
5411 addr_type = ADDR_LE_DEV_PUBLIC;
5413 addr_type = ADDR_LE_DEV_RANDOM;
5415 switch (key->type) {
5416 case MGMT_LTK_UNAUTHENTICATED:
5417 authenticated = 0x00;
5418 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5420 case MGMT_LTK_AUTHENTICATED:
5421 authenticated = 0x01;
5422 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5424 case MGMT_LTK_P256_UNAUTH:
5425 authenticated = 0x00;
5426 type = SMP_LTK_P256;
5428 case MGMT_LTK_P256_AUTH:
5429 authenticated = 0x01;
5430 type = SMP_LTK_P256;
5432 case MGMT_LTK_P256_DEBUG:
5433 authenticated = 0x00;
5434 type = SMP_LTK_P256_DEBUG;
5439 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5440 authenticated, key->val, key->enc_size, key->ediv,
5444 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5447 hci_dev_unlock(hdev);
5452 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5454 struct hci_conn *conn = cmd->user_data;
5455 struct mgmt_rp_get_conn_info rp;
5458 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5460 if (status == MGMT_STATUS_SUCCESS) {
5461 rp.rssi = conn->rssi;
5462 rp.tx_power = conn->tx_power;
5463 rp.max_tx_power = conn->max_tx_power;
5465 rp.rssi = HCI_RSSI_INVALID;
5466 rp.tx_power = HCI_TX_POWER_INVALID;
5467 rp.max_tx_power = HCI_TX_POWER_INVALID;
5470 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5471 status, &rp, sizeof(rp));
5473 hci_conn_drop(conn);
5479 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5482 struct hci_cp_read_rssi *cp;
5483 struct mgmt_pending_cmd *cmd;
5484 struct hci_conn *conn;
5488 BT_DBG("status 0x%02x", hci_status);
5492 /* Commands sent in request are either Read RSSI or Read Transmit Power
5493 * Level so we check which one was last sent to retrieve connection
5494 * handle. Both commands have handle as first parameter so it's safe to
5495 * cast data on the same command struct.
5497 * First command sent is always Read RSSI and we fail only if it fails.
5498 * In other case we simply override error to indicate success as we
5499 * already remembered if TX power value is actually valid.
5501 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5503 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5504 status = MGMT_STATUS_SUCCESS;
5506 status = mgmt_status(hci_status);
5510 BT_ERR("invalid sent_cmd in conn_info response");
5514 handle = __le16_to_cpu(cp->handle);
5515 conn = hci_conn_hash_lookup_handle(hdev, handle);
5517 BT_ERR("unknown handle (%d) in conn_info response", handle);
5521 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5525 cmd->cmd_complete(cmd, status);
5526 mgmt_pending_remove(cmd);
5529 hci_dev_unlock(hdev);
5532 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5535 struct mgmt_cp_get_conn_info *cp = data;
5536 struct mgmt_rp_get_conn_info rp;
5537 struct hci_conn *conn;
5538 unsigned long conn_info_age;
5541 BT_DBG("%s", hdev->name);
5543 memset(&rp, 0, sizeof(rp));
5544 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5545 rp.addr.type = cp->addr.type;
5547 if (!bdaddr_type_is_valid(cp->addr.type))
5548 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5549 MGMT_STATUS_INVALID_PARAMS,
5554 if (!hdev_is_powered(hdev)) {
5555 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5556 MGMT_STATUS_NOT_POWERED, &rp,
5561 if (cp->addr.type == BDADDR_BREDR)
5562 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5565 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5567 if (!conn || conn->state != BT_CONNECTED) {
5568 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5569 MGMT_STATUS_NOT_CONNECTED, &rp,
5574 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5575 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5576 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5580 /* To avoid client trying to guess when to poll again for information we
5581 * calculate conn info age as random value between min/max set in hdev.
5583 conn_info_age = hdev->conn_info_min_age +
5584 prandom_u32_max(hdev->conn_info_max_age -
5585 hdev->conn_info_min_age);
5587 /* Query controller to refresh cached values if they are too old or were
5590 if (time_after(jiffies, conn->conn_info_timestamp +
5591 msecs_to_jiffies(conn_info_age)) ||
5592 !conn->conn_info_timestamp) {
5593 struct hci_request req;
5594 struct hci_cp_read_tx_power req_txp_cp;
5595 struct hci_cp_read_rssi req_rssi_cp;
5596 struct mgmt_pending_cmd *cmd;
5598 hci_req_init(&req, hdev);
5599 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5600 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5603 /* For LE links TX power does not change thus we don't need to
5604 * query for it once value is known.
5606 if (!bdaddr_type_is_le(cp->addr.type) ||
5607 conn->tx_power == HCI_TX_POWER_INVALID) {
5608 req_txp_cp.handle = cpu_to_le16(conn->handle);
5609 req_txp_cp.type = 0x00;
5610 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5611 sizeof(req_txp_cp), &req_txp_cp);
5614 /* Max TX power needs to be read only once per connection */
5615 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5616 req_txp_cp.handle = cpu_to_le16(conn->handle);
5617 req_txp_cp.type = 0x01;
5618 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5619 sizeof(req_txp_cp), &req_txp_cp);
5622 err = hci_req_run(&req, conn_info_refresh_complete);
5626 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5633 hci_conn_hold(conn);
5634 cmd->user_data = hci_conn_get(conn);
5635 cmd->cmd_complete = conn_info_cmd_complete;
5637 conn->conn_info_timestamp = jiffies;
5639 /* Cache is valid, just reply with values cached in hci_conn */
5640 rp.rssi = conn->rssi;
5641 rp.tx_power = conn->tx_power;
5642 rp.max_tx_power = conn->max_tx_power;
5644 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5645 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5649 hci_dev_unlock(hdev);
5653 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5655 struct hci_conn *conn = cmd->user_data;
5656 struct mgmt_rp_get_clock_info rp;
5657 struct hci_dev *hdev;
5660 memset(&rp, 0, sizeof(rp));
5661 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5666 hdev = hci_dev_get(cmd->index);
5668 rp.local_clock = cpu_to_le32(hdev->clock);
5673 rp.piconet_clock = cpu_to_le32(conn->clock);
5674 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5678 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5682 hci_conn_drop(conn);
5689 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5691 struct hci_cp_read_clock *hci_cp;
5692 struct mgmt_pending_cmd *cmd;
5693 struct hci_conn *conn;
5695 BT_DBG("%s status %u", hdev->name, status);
5699 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5703 if (hci_cp->which) {
5704 u16 handle = __le16_to_cpu(hci_cp->handle);
5705 conn = hci_conn_hash_lookup_handle(hdev, handle);
5710 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5714 cmd->cmd_complete(cmd, mgmt_status(status));
5715 mgmt_pending_remove(cmd);
5718 hci_dev_unlock(hdev);
5721 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5724 struct mgmt_cp_get_clock_info *cp = data;
5725 struct mgmt_rp_get_clock_info rp;
5726 struct hci_cp_read_clock hci_cp;
5727 struct mgmt_pending_cmd *cmd;
5728 struct hci_request req;
5729 struct hci_conn *conn;
5732 BT_DBG("%s", hdev->name);
5734 memset(&rp, 0, sizeof(rp));
5735 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5736 rp.addr.type = cp->addr.type;
5738 if (cp->addr.type != BDADDR_BREDR)
5739 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5740 MGMT_STATUS_INVALID_PARAMS,
5745 if (!hdev_is_powered(hdev)) {
5746 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5747 MGMT_STATUS_NOT_POWERED, &rp,
5752 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5753 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5755 if (!conn || conn->state != BT_CONNECTED) {
5756 err = mgmt_cmd_complete(sk, hdev->id,
5757 MGMT_OP_GET_CLOCK_INFO,
5758 MGMT_STATUS_NOT_CONNECTED,
5766 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5772 cmd->cmd_complete = clock_info_cmd_complete;
5774 hci_req_init(&req, hdev);
5776 memset(&hci_cp, 0, sizeof(hci_cp));
5777 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5780 hci_conn_hold(conn);
5781 cmd->user_data = hci_conn_get(conn);
5783 hci_cp.handle = cpu_to_le16(conn->handle);
5784 hci_cp.which = 0x01; /* Piconet clock */
5785 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5788 err = hci_req_run(&req, get_clock_info_complete);
5790 mgmt_pending_remove(cmd);
5793 hci_dev_unlock(hdev);
5797 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5799 struct hci_conn *conn;
5801 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5805 if (conn->dst_type != type)
5808 if (conn->state != BT_CONNECTED)
5814 /* This function requires the caller holds hdev->lock */
5815 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5816 u8 addr_type, u8 auto_connect)
5818 struct hci_dev *hdev = req->hdev;
5819 struct hci_conn_params *params;
5821 params = hci_conn_params_add(hdev, addr, addr_type);
5825 if (params->auto_connect == auto_connect)
5828 list_del_init(¶ms->action);
5830 switch (auto_connect) {
5831 case HCI_AUTO_CONN_DISABLED:
5832 case HCI_AUTO_CONN_LINK_LOSS:
5833 __hci_update_background_scan(req);
5835 case HCI_AUTO_CONN_REPORT:
5836 list_add(¶ms->action, &hdev->pend_le_reports);
5837 __hci_update_background_scan(req);
5839 case HCI_AUTO_CONN_DIRECT:
5840 case HCI_AUTO_CONN_ALWAYS:
5841 if (!is_connected(hdev, addr, addr_type)) {
5842 list_add(¶ms->action, &hdev->pend_le_conns);
5843 __hci_update_background_scan(req);
5848 params->auto_connect = auto_connect;
5850 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5856 static void device_added(struct sock *sk, struct hci_dev *hdev,
5857 bdaddr_t *bdaddr, u8 type, u8 action)
5859 struct mgmt_ev_device_added ev;
5861 bacpy(&ev.addr.bdaddr, bdaddr);
5862 ev.addr.type = type;
5865 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5868 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5870 struct mgmt_pending_cmd *cmd;
5872 BT_DBG("status 0x%02x", status);
5876 cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
5880 cmd->cmd_complete(cmd, mgmt_status(status));
5881 mgmt_pending_remove(cmd);
5884 hci_dev_unlock(hdev);
5887 static int add_device(struct sock *sk, struct hci_dev *hdev,
5888 void *data, u16 len)
5890 struct mgmt_cp_add_device *cp = data;
5891 struct mgmt_pending_cmd *cmd;
5892 struct hci_request req;
5893 u8 auto_conn, addr_type;
5896 BT_DBG("%s", hdev->name);
5898 if (!bdaddr_type_is_valid(cp->addr.type) ||
5899 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5900 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5901 MGMT_STATUS_INVALID_PARAMS,
5902 &cp->addr, sizeof(cp->addr));
5904 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5905 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5906 MGMT_STATUS_INVALID_PARAMS,
5907 &cp->addr, sizeof(cp->addr));
5909 hci_req_init(&req, hdev);
5913 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5919 cmd->cmd_complete = addr_cmd_complete;
5921 if (cp->addr.type == BDADDR_BREDR) {
5922 /* Only incoming connections action is supported for now */
5923 if (cp->action != 0x01) {
5924 err = cmd->cmd_complete(cmd,
5925 MGMT_STATUS_INVALID_PARAMS);
5926 mgmt_pending_remove(cmd);
5930 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5935 __hci_update_page_scan(&req);
5940 if (cp->addr.type == BDADDR_LE_PUBLIC)
5941 addr_type = ADDR_LE_DEV_PUBLIC;
5943 addr_type = ADDR_LE_DEV_RANDOM;
5945 if (cp->action == 0x02)
5946 auto_conn = HCI_AUTO_CONN_ALWAYS;
5947 else if (cp->action == 0x01)
5948 auto_conn = HCI_AUTO_CONN_DIRECT;
5950 auto_conn = HCI_AUTO_CONN_REPORT;
5952 /* If the connection parameters don't exist for this device,
5953 * they will be created and configured with defaults.
5955 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5957 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5958 mgmt_pending_remove(cmd);
5963 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5965 err = hci_req_run(&req, add_device_complete);
5967 /* ENODATA means no HCI commands were needed (e.g. if
5968 * the adapter is powered off).
5970 if (err == -ENODATA)
5971 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5972 mgmt_pending_remove(cmd);
5976 hci_dev_unlock(hdev);
5980 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5981 bdaddr_t *bdaddr, u8 type)
5983 struct mgmt_ev_device_removed ev;
5985 bacpy(&ev.addr.bdaddr, bdaddr);
5986 ev.addr.type = type;
5988 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5991 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5993 struct mgmt_pending_cmd *cmd;
5995 BT_DBG("status 0x%02x", status);
5999 cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
6003 cmd->cmd_complete(cmd, mgmt_status(status));
6004 mgmt_pending_remove(cmd);
6007 hci_dev_unlock(hdev);
6010 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6011 void *data, u16 len)
6013 struct mgmt_cp_remove_device *cp = data;
6014 struct mgmt_pending_cmd *cmd;
6015 struct hci_request req;
6018 BT_DBG("%s", hdev->name);
6020 hci_req_init(&req, hdev);
6024 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
6030 cmd->cmd_complete = addr_cmd_complete;
6032 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6033 struct hci_conn_params *params;
6036 if (!bdaddr_type_is_valid(cp->addr.type)) {
6037 err = cmd->cmd_complete(cmd,
6038 MGMT_STATUS_INVALID_PARAMS);
6039 mgmt_pending_remove(cmd);
6043 if (cp->addr.type == BDADDR_BREDR) {
6044 err = hci_bdaddr_list_del(&hdev->whitelist,
6048 err = cmd->cmd_complete(cmd,
6049 MGMT_STATUS_INVALID_PARAMS);
6050 mgmt_pending_remove(cmd);
6054 __hci_update_page_scan(&req);
6056 device_removed(sk, hdev, &cp->addr.bdaddr,
6061 if (cp->addr.type == BDADDR_LE_PUBLIC)
6062 addr_type = ADDR_LE_DEV_PUBLIC;
6064 addr_type = ADDR_LE_DEV_RANDOM;
6066 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6069 err = cmd->cmd_complete(cmd,
6070 MGMT_STATUS_INVALID_PARAMS);
6071 mgmt_pending_remove(cmd);
6075 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
6076 err = cmd->cmd_complete(cmd,
6077 MGMT_STATUS_INVALID_PARAMS);
6078 mgmt_pending_remove(cmd);
6082 list_del(¶ms->action);
6083 list_del(¶ms->list);
6085 __hci_update_background_scan(&req);
6087 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6089 struct hci_conn_params *p, *tmp;
6090 struct bdaddr_list *b, *btmp;
6092 if (cp->addr.type) {
6093 err = cmd->cmd_complete(cmd,
6094 MGMT_STATUS_INVALID_PARAMS);
6095 mgmt_pending_remove(cmd);
6099 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6100 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6105 __hci_update_page_scan(&req);
6107 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6108 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6110 device_removed(sk, hdev, &p->addr, p->addr_type);
6111 list_del(&p->action);
6116 BT_DBG("All LE connection parameters were removed");
6118 __hci_update_background_scan(&req);
6122 err = hci_req_run(&req, remove_device_complete);
6124 /* ENODATA means no HCI commands were needed (e.g. if
6125 * the adapter is powered off).
6127 if (err == -ENODATA)
6128 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6129 mgmt_pending_remove(cmd);
6133 hci_dev_unlock(hdev);
6137 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6140 struct mgmt_cp_load_conn_param *cp = data;
6141 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6142 sizeof(struct mgmt_conn_param));
6143 u16 param_count, expected_len;
6146 if (!lmp_le_capable(hdev))
6147 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6148 MGMT_STATUS_NOT_SUPPORTED);
6150 param_count = __le16_to_cpu(cp->param_count);
6151 if (param_count > max_param_count) {
6152 BT_ERR("load_conn_param: too big param_count value %u",
6154 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6155 MGMT_STATUS_INVALID_PARAMS);
6158 expected_len = sizeof(*cp) + param_count *
6159 sizeof(struct mgmt_conn_param);
6160 if (expected_len != len) {
6161 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6163 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6164 MGMT_STATUS_INVALID_PARAMS);
6167 BT_DBG("%s param_count %u", hdev->name, param_count);
6171 hci_conn_params_clear_disabled(hdev);
6173 for (i = 0; i < param_count; i++) {
6174 struct mgmt_conn_param *param = &cp->params[i];
6175 struct hci_conn_params *hci_param;
6176 u16 min, max, latency, timeout;
6179 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
6182 if (param->addr.type == BDADDR_LE_PUBLIC) {
6183 addr_type = ADDR_LE_DEV_PUBLIC;
6184 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6185 addr_type = ADDR_LE_DEV_RANDOM;
6187 BT_ERR("Ignoring invalid connection parameters");
6191 min = le16_to_cpu(param->min_interval);
6192 max = le16_to_cpu(param->max_interval);
6193 latency = le16_to_cpu(param->latency);
6194 timeout = le16_to_cpu(param->timeout);
6196 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6197 min, max, latency, timeout);
6199 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6200 BT_ERR("Ignoring invalid connection parameters");
6204 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6207 BT_ERR("Failed to add connection parameters");
6211 hci_param->conn_min_interval = min;
6212 hci_param->conn_max_interval = max;
6213 hci_param->conn_latency = latency;
6214 hci_param->supervision_timeout = timeout;
6217 hci_dev_unlock(hdev);
6219 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6223 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6224 void *data, u16 len)
6226 struct mgmt_cp_set_external_config *cp = data;
6230 BT_DBG("%s", hdev->name);
6232 if (hdev_is_powered(hdev))
6233 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6234 MGMT_STATUS_REJECTED);
6236 if (cp->config != 0x00 && cp->config != 0x01)
6237 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6238 MGMT_STATUS_INVALID_PARAMS);
6240 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6241 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6242 MGMT_STATUS_NOT_SUPPORTED);
6247 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6249 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6251 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6258 err = new_options(hdev, sk);
6260 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6261 mgmt_index_removed(hdev);
6263 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6264 hci_dev_set_flag(hdev, HCI_CONFIG);
6265 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6267 queue_work(hdev->req_workqueue, &hdev->power_on);
6269 set_bit(HCI_RAW, &hdev->flags);
6270 mgmt_index_added(hdev);
6275 hci_dev_unlock(hdev);
6279 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6280 void *data, u16 len)
6282 struct mgmt_cp_set_public_address *cp = data;
6286 BT_DBG("%s", hdev->name);
6288 if (hdev_is_powered(hdev))
6289 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6290 MGMT_STATUS_REJECTED);
6292 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6293 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6294 MGMT_STATUS_INVALID_PARAMS);
6296 if (!hdev->set_bdaddr)
6297 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6298 MGMT_STATUS_NOT_SUPPORTED);
6302 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6303 bacpy(&hdev->public_addr, &cp->bdaddr);
6305 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6312 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6313 err = new_options(hdev, sk);
6315 if (is_configured(hdev)) {
6316 mgmt_index_removed(hdev);
6318 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6320 hci_dev_set_flag(hdev, HCI_CONFIG);
6321 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6323 queue_work(hdev->req_workqueue, &hdev->power_on);
6327 hci_dev_unlock(hdev);
6331 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6334 eir[eir_len++] = sizeof(type) + data_len;
6335 eir[eir_len++] = type;
6336 memcpy(&eir[eir_len], data, data_len);
6337 eir_len += data_len;
6342 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6343 void *data, u16 data_len)
6345 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6346 struct mgmt_rp_read_local_oob_ext_data *rp;
6349 u8 status, flags, role, addr[7], hash[16], rand[16];
6352 BT_DBG("%s", hdev->name);
6354 if (!hdev_is_powered(hdev))
6355 return mgmt_cmd_complete(sk, hdev->id,
6356 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6357 MGMT_STATUS_NOT_POWERED,
6358 &cp->type, sizeof(cp->type));
6361 case BIT(BDADDR_BREDR):
6362 status = mgmt_bredr_support(hdev);
6364 return mgmt_cmd_complete(sk, hdev->id,
6365 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6370 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6371 status = mgmt_le_support(hdev);
6373 return mgmt_cmd_complete(sk, hdev->id,
6374 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6377 eir_len = 9 + 3 + 18 + 18 + 3;
6380 return mgmt_cmd_complete(sk, hdev->id,
6381 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6382 MGMT_STATUS_INVALID_PARAMS,
6383 &cp->type, sizeof(cp->type));
6388 rp_len = sizeof(*rp) + eir_len;
6389 rp = kmalloc(rp_len, GFP_ATOMIC);
6391 hci_dev_unlock(hdev);
6397 case BIT(BDADDR_BREDR):
6398 eir_len = eir_append_data(rp->eir, eir_len, EIR_CLASS_OF_DEV,
6399 hdev->dev_class, 3);
6401 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6402 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6403 smp_generate_oob(hdev, hash, rand) < 0) {
6404 hci_dev_unlock(hdev);
6405 err = mgmt_cmd_complete(sk, hdev->id,
6406 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6408 &cp->type, sizeof(cp->type));
6412 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6413 memcpy(addr, &hdev->rpa, 6);
6415 } else if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6416 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6417 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6418 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6419 memcpy(addr, &hdev->static_addr, 6);
6422 memcpy(addr, &hdev->bdaddr, 6);
6426 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6427 addr, sizeof(addr));
6429 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6434 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6435 &role, sizeof(role));
6437 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6438 eir_len = eir_append_data(rp->eir, eir_len,
6440 hash, sizeof(hash));
6442 eir_len = eir_append_data(rp->eir, eir_len,
6444 rand, sizeof(rand));
6447 flags = get_adv_discov_flags(hdev);
6449 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6450 flags |= LE_AD_NO_BREDR;
6452 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6453 &flags, sizeof(flags));
6457 rp->type = cp->type;
6458 rp->eir_len = cpu_to_le16(eir_len);
6460 hci_dev_unlock(hdev);
6462 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6464 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6465 MGMT_STATUS_SUCCESS, rp, sizeof(*rp) + eir_len);
6469 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6470 rp, sizeof(*rp) + eir_len,
6471 HCI_MGMT_OOB_DATA_EVENTS, sk);
6479 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6480 void *data, u16 data_len)
6482 struct mgmt_rp_read_adv_features *rp;
6487 BT_DBG("%s", hdev->name);
6491 rp_len = sizeof(*rp);
6493 /* Currently only one instance is supported, so just add 1 to the
6496 instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
6500 rp = kmalloc(rp_len, GFP_ATOMIC);
6502 hci_dev_unlock(hdev);
6506 rp->supported_flags = cpu_to_le32(0);
6507 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6508 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6509 rp->max_instances = 1;
6511 /* Currently only one instance is supported, so simply return the
6512 * current instance number.
6515 rp->num_instances = 1;
6516 rp->instance[0] = 1;
6518 rp->num_instances = 0;
6521 hci_dev_unlock(hdev);
6523 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6524 MGMT_STATUS_SUCCESS, rp, rp_len);
6531 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6534 u8 max_len = HCI_MAX_AD_LENGTH;
6537 /* TODO: Correctly reduce len based on adv_flags. */
6542 /* Make sure that the data is correctly formatted. */
6543 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6546 /* If the current field length would exceed the total data
6547 * length, then it's invalid.
6549 if (i + cur_len >= len)
6556 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6559 struct mgmt_pending_cmd *cmd;
6560 struct mgmt_rp_add_advertising rp;
6562 BT_DBG("status %d", status);
6566 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6569 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6570 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6571 advertising_removed(cmd ? cmd->sk : NULL, hdev, 1);
6580 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6581 mgmt_status(status));
6583 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6584 mgmt_status(status), &rp, sizeof(rp));
6586 mgmt_pending_remove(cmd);
6589 hci_dev_unlock(hdev);
6592 static void adv_timeout_expired(struct work_struct *work)
6594 struct hci_dev *hdev = container_of(work, struct hci_dev,
6595 adv_instance.timeout_exp.work);
6597 hdev->adv_instance.timeout = 0;
6600 clear_adv_instance(hdev);
6601 hci_dev_unlock(hdev);
6604 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6605 void *data, u16 data_len)
6607 struct mgmt_cp_add_advertising *cp = data;
6608 struct mgmt_rp_add_advertising rp;
6613 struct mgmt_pending_cmd *cmd;
6614 struct hci_request req;
6616 BT_DBG("%s", hdev->name);
6618 status = mgmt_le_support(hdev);
6620 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6623 flags = __le32_to_cpu(cp->flags);
6624 timeout = __le16_to_cpu(cp->timeout);
6626 /* The current implementation only supports adding one instance and
6627 * doesn't support flags.
6629 if (cp->instance != 0x01 || flags)
6630 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6631 MGMT_STATUS_INVALID_PARAMS);
6635 if (timeout && !hdev_is_powered(hdev)) {
6636 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6637 MGMT_STATUS_REJECTED);
6641 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6642 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6643 pending_find(MGMT_OP_SET_LE, hdev)) {
6644 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6649 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len) ||
6650 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6651 cp->scan_rsp_len)) {
6652 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6653 MGMT_STATUS_INVALID_PARAMS);
6657 INIT_DELAYED_WORK(&hdev->adv_instance.timeout_exp, adv_timeout_expired);
6659 hdev->adv_instance.flags = flags;
6660 hdev->adv_instance.adv_data_len = cp->adv_data_len;
6661 hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len;
6663 if (cp->adv_data_len)
6664 memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len);
6666 if (cp->scan_rsp_len)
6667 memcpy(hdev->adv_instance.scan_rsp_data,
6668 cp->data + cp->adv_data_len, cp->scan_rsp_len);
6670 if (hdev->adv_instance.timeout)
6671 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
6673 hdev->adv_instance.timeout = timeout;
6676 queue_delayed_work(hdev->workqueue,
6677 &hdev->adv_instance.timeout_exp,
6678 msecs_to_jiffies(timeout * 1000));
6680 if (!hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING_INSTANCE))
6681 advertising_added(sk, hdev, 1);
6683 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
6684 * we have no HCI communication to make. Simply return.
6686 if (!hdev_is_powered(hdev) ||
6687 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6689 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6690 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6694 /* We're good to go, update advertising data, parameters, and start
6697 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6704 hci_req_init(&req, hdev);
6706 update_adv_data(&req);
6707 update_scan_rsp_data(&req);
6708 enable_advertising(&req);
6710 err = hci_req_run(&req, add_advertising_complete);
6712 mgmt_pending_remove(cmd);
6715 hci_dev_unlock(hdev);
6720 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6723 struct mgmt_pending_cmd *cmd;
6724 struct mgmt_rp_remove_advertising rp;
6726 BT_DBG("status %d", status);
6730 /* A failure status here only means that we failed to disable
6731 * advertising. Otherwise, the advertising instance has been removed,
6732 * so report success.
6734 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6740 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6742 mgmt_pending_remove(cmd);
6745 hci_dev_unlock(hdev);
6748 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6749 void *data, u16 data_len)
6751 struct mgmt_cp_remove_advertising *cp = data;
6752 struct mgmt_rp_remove_advertising rp;
6754 struct mgmt_pending_cmd *cmd;
6755 struct hci_request req;
6757 BT_DBG("%s", hdev->name);
6759 /* The current implementation only allows modifying instance no 1. A
6760 * value of 0 indicates that all instances should be cleared.
6762 if (cp->instance > 1)
6763 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6764 MGMT_STATUS_INVALID_PARAMS);
6768 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6769 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6770 pending_find(MGMT_OP_SET_LE, hdev)) {
6771 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6776 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
6777 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6778 MGMT_STATUS_INVALID_PARAMS);
6782 if (hdev->adv_instance.timeout)
6783 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
6785 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6787 advertising_removed(sk, hdev, 1);
6789 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6791 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
6792 * we have no HCI communication to make. Simply return.
6794 if (!hdev_is_powered(hdev) ||
6795 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6797 err = mgmt_cmd_complete(sk, hdev->id,
6798 MGMT_OP_REMOVE_ADVERTISING,
6799 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6803 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6810 hci_req_init(&req, hdev);
6811 disable_advertising(&req);
6813 err = hci_req_run(&req, remove_advertising_complete);
6815 mgmt_pending_remove(cmd);
6818 hci_dev_unlock(hdev);
6823 static const struct hci_mgmt_handler mgmt_handlers[] = {
6824 { NULL }, /* 0x0000 (no command) */
6825 { read_version, MGMT_READ_VERSION_SIZE,
6827 HCI_MGMT_UNTRUSTED },
6828 { read_commands, MGMT_READ_COMMANDS_SIZE,
6830 HCI_MGMT_UNTRUSTED },
6831 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6833 HCI_MGMT_UNTRUSTED },
6834 { read_controller_info, MGMT_READ_INFO_SIZE,
6835 HCI_MGMT_UNTRUSTED },
6836 { set_powered, MGMT_SETTING_SIZE },
6837 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
6838 { set_connectable, MGMT_SETTING_SIZE },
6839 { set_fast_connectable, MGMT_SETTING_SIZE },
6840 { set_bondable, MGMT_SETTING_SIZE },
6841 { set_link_security, MGMT_SETTING_SIZE },
6842 { set_ssp, MGMT_SETTING_SIZE },
6843 { set_hs, MGMT_SETTING_SIZE },
6844 { set_le, MGMT_SETTING_SIZE },
6845 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
6846 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
6847 { add_uuid, MGMT_ADD_UUID_SIZE },
6848 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
6849 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6851 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6853 { disconnect, MGMT_DISCONNECT_SIZE },
6854 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
6855 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
6856 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6857 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
6858 { pair_device, MGMT_PAIR_DEVICE_SIZE },
6859 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6860 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
6861 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
6862 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6863 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
6864 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6865 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6866 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6868 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6869 { start_discovery, MGMT_START_DISCOVERY_SIZE },
6870 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
6871 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
6872 { block_device, MGMT_BLOCK_DEVICE_SIZE },
6873 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
6874 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
6875 { set_advertising, MGMT_SETTING_SIZE },
6876 { set_bredr, MGMT_SETTING_SIZE },
6877 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
6878 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
6879 { set_secure_conn, MGMT_SETTING_SIZE },
6880 { set_debug_keys, MGMT_SETTING_SIZE },
6881 { set_privacy, MGMT_SET_PRIVACY_SIZE },
6882 { load_irks, MGMT_LOAD_IRKS_SIZE,
6884 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
6885 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
6886 { add_device, MGMT_ADD_DEVICE_SIZE },
6887 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
6888 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6890 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6892 HCI_MGMT_UNTRUSTED },
6893 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6894 HCI_MGMT_UNCONFIGURED |
6895 HCI_MGMT_UNTRUSTED },
6896 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6897 HCI_MGMT_UNCONFIGURED },
6898 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6899 HCI_MGMT_UNCONFIGURED },
6900 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6902 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6903 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
6905 HCI_MGMT_UNTRUSTED },
6906 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
6907 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
6909 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
6912 void mgmt_index_added(struct hci_dev *hdev)
6914 struct mgmt_ev_ext_index ev;
6916 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6919 switch (hdev->dev_type) {
6921 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6922 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
6923 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6926 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
6927 HCI_MGMT_INDEX_EVENTS);
6940 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
6941 HCI_MGMT_EXT_INDEX_EVENTS);
6944 void mgmt_index_removed(struct hci_dev *hdev)
6946 struct mgmt_ev_ext_index ev;
6947 u8 status = MGMT_STATUS_INVALID_INDEX;
6949 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6952 switch (hdev->dev_type) {
6954 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6956 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6957 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
6958 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6961 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
6962 HCI_MGMT_INDEX_EVENTS);
6975 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
6976 HCI_MGMT_EXT_INDEX_EVENTS);
6979 /* This function requires the caller holds hdev->lock */
6980 static void restart_le_actions(struct hci_request *req)
6982 struct hci_dev *hdev = req->hdev;
6983 struct hci_conn_params *p;
6985 list_for_each_entry(p, &hdev->le_conn_params, list) {
6986 /* Needed for AUTO_OFF case where might not "really"
6987 * have been powered off.
6989 list_del_init(&p->action);
6991 switch (p->auto_connect) {
6992 case HCI_AUTO_CONN_DIRECT:
6993 case HCI_AUTO_CONN_ALWAYS:
6994 list_add(&p->action, &hdev->pend_le_conns);
6996 case HCI_AUTO_CONN_REPORT:
6997 list_add(&p->action, &hdev->pend_le_reports);
7004 __hci_update_background_scan(req);
7007 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7009 struct cmd_lookup match = { NULL, hdev };
7011 BT_DBG("status 0x%02x", status);
7014 /* Register the available SMP channels (BR/EDR and LE) only
7015 * when successfully powering on the controller. This late
7016 * registration is required so that LE SMP can clearly
7017 * decide if the public address or static address is used.
7024 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7026 new_settings(hdev, match.sk);
7028 hci_dev_unlock(hdev);
7034 static int powered_update_hci(struct hci_dev *hdev)
7036 struct hci_request req;
7039 hci_req_init(&req, hdev);
7041 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
7042 !lmp_host_ssp_capable(hdev)) {
7045 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
7047 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
7050 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
7051 sizeof(support), &support);
7055 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7056 lmp_bredr_capable(hdev)) {
7057 struct hci_cp_write_le_host_supported cp;
7062 /* Check first if we already have the right
7063 * host state (host features set)
7065 if (cp.le != lmp_host_le_capable(hdev) ||
7066 cp.simul != lmp_host_le_br_capable(hdev))
7067 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
7071 if (lmp_le_capable(hdev)) {
7072 /* Make sure the controller has a good default for
7073 * advertising data. This also applies to the case
7074 * where BR/EDR was toggled during the AUTO_OFF phase.
7076 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
7077 update_adv_data(&req);
7078 update_scan_rsp_data(&req);
7081 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7082 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
7083 enable_advertising(&req);
7085 restart_le_actions(&req);
7088 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
7089 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
7090 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
7091 sizeof(link_sec), &link_sec);
7093 if (lmp_bredr_capable(hdev)) {
7094 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
7095 write_fast_connectable(&req, true);
7097 write_fast_connectable(&req, false);
7098 __hci_update_page_scan(&req);
7104 return hci_req_run(&req, powered_complete);
7107 int mgmt_powered(struct hci_dev *hdev, u8 powered)
7109 struct cmd_lookup match = { NULL, hdev };
7110 u8 status, zero_cod[] = { 0, 0, 0 };
7113 if (!hci_dev_test_flag(hdev, HCI_MGMT))
7117 if (powered_update_hci(hdev) == 0)
7120 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
7125 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7127 /* If the power off is because of hdev unregistration let
7128 * use the appropriate INVALID_INDEX status. Otherwise use
7129 * NOT_POWERED. We cover both scenarios here since later in
7130 * mgmt_index_removed() any hci_conn callbacks will have already
7131 * been triggered, potentially causing misleading DISCONNECTED
7134 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7135 status = MGMT_STATUS_INVALID_INDEX;
7137 status = MGMT_STATUS_NOT_POWERED;
7139 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7141 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
7142 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7143 zero_cod, sizeof(zero_cod), NULL);
7146 err = new_settings(hdev, match.sk);
7154 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7156 struct mgmt_pending_cmd *cmd;
7159 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7163 if (err == -ERFKILL)
7164 status = MGMT_STATUS_RFKILLED;
7166 status = MGMT_STATUS_FAILED;
7168 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7170 mgmt_pending_remove(cmd);
7173 void mgmt_discoverable_timeout(struct hci_dev *hdev)
7175 struct hci_request req;
7179 /* When discoverable timeout triggers, then just make sure
7180 * the limited discoverable flag is cleared. Even in the case
7181 * of a timeout triggered from general discoverable, it is
7182 * safe to unconditionally clear the flag.
7184 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
7185 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
7187 hci_req_init(&req, hdev);
7188 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7189 u8 scan = SCAN_PAGE;
7190 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
7191 sizeof(scan), &scan);
7195 /* Advertising instances don't use the global discoverable setting, so
7196 * only update AD if advertising was enabled using Set Advertising.
7198 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7199 update_adv_data(&req);
7201 hci_req_run(&req, NULL);
7203 hdev->discov_timeout = 0;
7205 new_settings(hdev, NULL);
7207 hci_dev_unlock(hdev);
7210 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7213 struct mgmt_ev_new_link_key ev;
7215 memset(&ev, 0, sizeof(ev));
7217 ev.store_hint = persistent;
7218 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7219 ev.key.addr.type = BDADDR_BREDR;
7220 ev.key.type = key->type;
7221 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7222 ev.key.pin_len = key->pin_len;
7224 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7227 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7229 switch (ltk->type) {
7232 if (ltk->authenticated)
7233 return MGMT_LTK_AUTHENTICATED;
7234 return MGMT_LTK_UNAUTHENTICATED;
7236 if (ltk->authenticated)
7237 return MGMT_LTK_P256_AUTH;
7238 return MGMT_LTK_P256_UNAUTH;
7239 case SMP_LTK_P256_DEBUG:
7240 return MGMT_LTK_P256_DEBUG;
7243 return MGMT_LTK_UNAUTHENTICATED;
7246 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7248 struct mgmt_ev_new_long_term_key ev;
7250 memset(&ev, 0, sizeof(ev));
7252 /* Devices using resolvable or non-resolvable random addresses
7253 * without providing an indentity resolving key don't require
7254 * to store long term keys. Their addresses will change the
7257 * Only when a remote device provides an identity address
7258 * make sure the long term key is stored. If the remote
7259 * identity is known, the long term keys are internally
7260 * mapped to the identity address. So allow static random
7261 * and public addresses here.
7263 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7264 (key->bdaddr.b[5] & 0xc0) != 0xc0)
7265 ev.store_hint = 0x00;
7267 ev.store_hint = persistent;
7269 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7270 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7271 ev.key.type = mgmt_ltk_type(key);
7272 ev.key.enc_size = key->enc_size;
7273 ev.key.ediv = key->ediv;
7274 ev.key.rand = key->rand;
7276 if (key->type == SMP_LTK)
7279 memcpy(ev.key.val, key->val, sizeof(key->val));
7281 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7284 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
7286 struct mgmt_ev_new_irk ev;
7288 memset(&ev, 0, sizeof(ev));
7290 /* For identity resolving keys from devices that are already
7291 * using a public address or static random address, do not
7292 * ask for storing this key. The identity resolving key really
7293 * is only mandatory for devices using resovlable random
7296 * Storing all identity resolving keys has the downside that
7297 * they will be also loaded on next boot of they system. More
7298 * identity resolving keys, means more time during scanning is
7299 * needed to actually resolve these addresses.
7301 if (bacmp(&irk->rpa, BDADDR_ANY))
7302 ev.store_hint = 0x01;
7304 ev.store_hint = 0x00;
7306 bacpy(&ev.rpa, &irk->rpa);
7307 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7308 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7309 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7311 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7314 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7317 struct mgmt_ev_new_csrk ev;
7319 memset(&ev, 0, sizeof(ev));
7321 /* Devices using resolvable or non-resolvable random addresses
7322 * without providing an indentity resolving key don't require
7323 * to store signature resolving keys. Their addresses will change
7324 * the next time around.
7326 * Only when a remote device provides an identity address
7327 * make sure the signature resolving key is stored. So allow
7328 * static random and public addresses here.
7330 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7331 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7332 ev.store_hint = 0x00;
7334 ev.store_hint = persistent;
7336 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7337 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7338 ev.key.type = csrk->type;
7339 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7341 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7344 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7345 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7346 u16 max_interval, u16 latency, u16 timeout)
7348 struct mgmt_ev_new_conn_param ev;
7350 if (!hci_is_identity_address(bdaddr, bdaddr_type))
7353 memset(&ev, 0, sizeof(ev));
7354 bacpy(&ev.addr.bdaddr, bdaddr);
7355 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7356 ev.store_hint = store_hint;
7357 ev.min_interval = cpu_to_le16(min_interval);
7358 ev.max_interval = cpu_to_le16(max_interval);
7359 ev.latency = cpu_to_le16(latency);
7360 ev.timeout = cpu_to_le16(timeout);
7362 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7365 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7366 u32 flags, u8 *name, u8 name_len)
7369 struct mgmt_ev_device_connected *ev = (void *) buf;
7372 bacpy(&ev->addr.bdaddr, &conn->dst);
7373 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7375 ev->flags = __cpu_to_le32(flags);
7377 /* We must ensure that the EIR Data fields are ordered and
7378 * unique. Keep it simple for now and avoid the problem by not
7379 * adding any BR/EDR data to the LE adv.
7381 if (conn->le_adv_data_len > 0) {
7382 memcpy(&ev->eir[eir_len],
7383 conn->le_adv_data, conn->le_adv_data_len);
7384 eir_len = conn->le_adv_data_len;
7387 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7390 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7391 eir_len = eir_append_data(ev->eir, eir_len,
7393 conn->dev_class, 3);
7396 ev->eir_len = cpu_to_le16(eir_len);
7398 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7399 sizeof(*ev) + eir_len, NULL);
7402 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7404 struct sock **sk = data;
7406 cmd->cmd_complete(cmd, 0);
7411 mgmt_pending_remove(cmd);
7414 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7416 struct hci_dev *hdev = data;
7417 struct mgmt_cp_unpair_device *cp = cmd->param;
7419 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7421 cmd->cmd_complete(cmd, 0);
7422 mgmt_pending_remove(cmd);
7425 bool mgmt_powering_down(struct hci_dev *hdev)
7427 struct mgmt_pending_cmd *cmd;
7428 struct mgmt_mode *cp;
7430 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7441 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7442 u8 link_type, u8 addr_type, u8 reason,
7443 bool mgmt_connected)
7445 struct mgmt_ev_device_disconnected ev;
7446 struct sock *sk = NULL;
7448 /* The connection is still in hci_conn_hash so test for 1
7449 * instead of 0 to know if this is the last one.
7451 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7452 cancel_delayed_work(&hdev->power_off);
7453 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7456 if (!mgmt_connected)
7459 if (link_type != ACL_LINK && link_type != LE_LINK)
7462 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7464 bacpy(&ev.addr.bdaddr, bdaddr);
7465 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7468 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7473 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7477 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7478 u8 link_type, u8 addr_type, u8 status)
7480 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7481 struct mgmt_cp_disconnect *cp;
7482 struct mgmt_pending_cmd *cmd;
7484 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7487 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7493 if (bacmp(bdaddr, &cp->addr.bdaddr))
7496 if (cp->addr.type != bdaddr_type)
7499 cmd->cmd_complete(cmd, mgmt_status(status));
7500 mgmt_pending_remove(cmd);
7503 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7504 u8 addr_type, u8 status)
7506 struct mgmt_ev_connect_failed ev;
7508 /* The connection is still in hci_conn_hash so test for 1
7509 * instead of 0 to know if this is the last one.
7511 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7512 cancel_delayed_work(&hdev->power_off);
7513 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7516 bacpy(&ev.addr.bdaddr, bdaddr);
7517 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7518 ev.status = mgmt_status(status);
7520 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7523 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7525 struct mgmt_ev_pin_code_request ev;
7527 bacpy(&ev.addr.bdaddr, bdaddr);
7528 ev.addr.type = BDADDR_BREDR;
7531 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7534 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7537 struct mgmt_pending_cmd *cmd;
7539 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7543 cmd->cmd_complete(cmd, mgmt_status(status));
7544 mgmt_pending_remove(cmd);
7547 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7550 struct mgmt_pending_cmd *cmd;
7552 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7556 cmd->cmd_complete(cmd, mgmt_status(status));
7557 mgmt_pending_remove(cmd);
7560 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7561 u8 link_type, u8 addr_type, u32 value,
7564 struct mgmt_ev_user_confirm_request ev;
7566 BT_DBG("%s", hdev->name);
7568 bacpy(&ev.addr.bdaddr, bdaddr);
7569 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7570 ev.confirm_hint = confirm_hint;
7571 ev.value = cpu_to_le32(value);
7573 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7577 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7578 u8 link_type, u8 addr_type)
7580 struct mgmt_ev_user_passkey_request ev;
7582 BT_DBG("%s", hdev->name);
7584 bacpy(&ev.addr.bdaddr, bdaddr);
7585 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7587 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7591 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7592 u8 link_type, u8 addr_type, u8 status,
7595 struct mgmt_pending_cmd *cmd;
7597 cmd = pending_find(opcode, hdev);
7601 cmd->cmd_complete(cmd, mgmt_status(status));
7602 mgmt_pending_remove(cmd);
7607 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7608 u8 link_type, u8 addr_type, u8 status)
7610 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7611 status, MGMT_OP_USER_CONFIRM_REPLY);
7614 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7615 u8 link_type, u8 addr_type, u8 status)
7617 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7619 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7622 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7623 u8 link_type, u8 addr_type, u8 status)
7625 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7626 status, MGMT_OP_USER_PASSKEY_REPLY);
7629 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7630 u8 link_type, u8 addr_type, u8 status)
7632 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7634 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7637 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7638 u8 link_type, u8 addr_type, u32 passkey,
7641 struct mgmt_ev_passkey_notify ev;
7643 BT_DBG("%s", hdev->name);
7645 bacpy(&ev.addr.bdaddr, bdaddr);
7646 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7647 ev.passkey = __cpu_to_le32(passkey);
7648 ev.entered = entered;
7650 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7653 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7655 struct mgmt_ev_auth_failed ev;
7656 struct mgmt_pending_cmd *cmd;
7657 u8 status = mgmt_status(hci_status);
7659 bacpy(&ev.addr.bdaddr, &conn->dst);
7660 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7663 cmd = find_pairing(conn);
7665 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7666 cmd ? cmd->sk : NULL);
7669 cmd->cmd_complete(cmd, status);
7670 mgmt_pending_remove(cmd);
7674 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7676 struct cmd_lookup match = { NULL, hdev };
7680 u8 mgmt_err = mgmt_status(status);
7681 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7682 cmd_status_rsp, &mgmt_err);
7686 if (test_bit(HCI_AUTH, &hdev->flags))
7687 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7689 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7691 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7695 new_settings(hdev, match.sk);
7701 static void clear_eir(struct hci_request *req)
7703 struct hci_dev *hdev = req->hdev;
7704 struct hci_cp_write_eir cp;
7706 if (!lmp_ext_inq_capable(hdev))
7709 memset(hdev->eir, 0, sizeof(hdev->eir));
7711 memset(&cp, 0, sizeof(cp));
7713 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7716 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7718 struct cmd_lookup match = { NULL, hdev };
7719 struct hci_request req;
7720 bool changed = false;
7723 u8 mgmt_err = mgmt_status(status);
7725 if (enable && hci_dev_test_and_clear_flag(hdev,
7727 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7728 new_settings(hdev, NULL);
7731 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7737 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7739 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7741 changed = hci_dev_test_and_clear_flag(hdev,
7744 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7747 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7750 new_settings(hdev, match.sk);
7755 hci_req_init(&req, hdev);
7757 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7758 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7759 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7760 sizeof(enable), &enable);
7766 hci_req_run(&req, NULL);
7769 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7771 struct cmd_lookup *match = data;
7773 if (match->sk == NULL) {
7774 match->sk = cmd->sk;
7775 sock_hold(match->sk);
7779 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7782 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7784 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7785 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7786 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7789 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7790 dev_class, 3, NULL);
7796 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7798 struct mgmt_cp_set_local_name ev;
7799 struct mgmt_pending_cmd *cmd;
7804 memset(&ev, 0, sizeof(ev));
7805 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7806 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7808 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7810 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7812 /* If this is a HCI command related to powering on the
7813 * HCI dev don't send any mgmt signals.
7815 if (pending_find(MGMT_OP_SET_POWERED, hdev))
7819 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7820 cmd ? cmd->sk : NULL);
7823 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7824 u8 *rand192, u8 *hash256, u8 *rand256,
7827 struct mgmt_pending_cmd *cmd;
7829 BT_DBG("%s status %u", hdev->name, status);
7831 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7836 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7837 mgmt_status(status));
7839 struct mgmt_rp_read_local_oob_data rp;
7840 size_t rp_size = sizeof(rp);
7842 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7843 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7845 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7846 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7847 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7849 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7852 mgmt_cmd_complete(cmd->sk, hdev->id,
7853 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7857 mgmt_pending_remove(cmd);
7860 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7864 for (i = 0; i < uuid_count; i++) {
7865 if (!memcmp(uuid, uuids[i], 16))
7872 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7876 while (parsed < eir_len) {
7877 u8 field_len = eir[0];
7884 if (eir_len - parsed < field_len + 1)
7888 case EIR_UUID16_ALL:
7889 case EIR_UUID16_SOME:
7890 for (i = 0; i + 3 <= field_len; i += 2) {
7891 memcpy(uuid, bluetooth_base_uuid, 16);
7892 uuid[13] = eir[i + 3];
7893 uuid[12] = eir[i + 2];
7894 if (has_uuid(uuid, uuid_count, uuids))
7898 case EIR_UUID32_ALL:
7899 case EIR_UUID32_SOME:
7900 for (i = 0; i + 5 <= field_len; i += 4) {
7901 memcpy(uuid, bluetooth_base_uuid, 16);
7902 uuid[15] = eir[i + 5];
7903 uuid[14] = eir[i + 4];
7904 uuid[13] = eir[i + 3];
7905 uuid[12] = eir[i + 2];
7906 if (has_uuid(uuid, uuid_count, uuids))
7910 case EIR_UUID128_ALL:
7911 case EIR_UUID128_SOME:
7912 for (i = 0; i + 17 <= field_len; i += 16) {
7913 memcpy(uuid, eir + i + 2, 16);
7914 if (has_uuid(uuid, uuid_count, uuids))
7920 parsed += field_len + 1;
7921 eir += field_len + 1;
7927 static void restart_le_scan(struct hci_dev *hdev)
7929 /* If controller is not scanning we are done. */
7930 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7933 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7934 hdev->discovery.scan_start +
7935 hdev->discovery.scan_duration))
7938 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
7939 DISCOV_LE_RESTART_DELAY);
7942 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7943 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7945 /* If a RSSI threshold has been specified, and
7946 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7947 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7948 * is set, let it through for further processing, as we might need to
7951 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7952 * the results are also dropped.
7954 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7955 (rssi == HCI_RSSI_INVALID ||
7956 (rssi < hdev->discovery.rssi &&
7957 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7960 if (hdev->discovery.uuid_count != 0) {
7961 /* If a list of UUIDs is provided in filter, results with no
7962 * matching UUID should be dropped.
7964 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7965 hdev->discovery.uuids) &&
7966 !eir_has_uuids(scan_rsp, scan_rsp_len,
7967 hdev->discovery.uuid_count,
7968 hdev->discovery.uuids))
7972 /* If duplicate filtering does not report RSSI changes, then restart
7973 * scanning to ensure updated result with updated RSSI values.
7975 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7976 restart_le_scan(hdev);
7978 /* Validate RSSI value against the RSSI threshold once more. */
7979 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7980 rssi < hdev->discovery.rssi)
7987 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7988 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7989 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7992 struct mgmt_ev_device_found *ev = (void *)buf;
7995 /* Don't send events for a non-kernel initiated discovery. With
7996 * LE one exception is if we have pend_le_reports > 0 in which
7997 * case we're doing passive scanning and want these events.
7999 if (!hci_discovery_active(hdev)) {
8000 if (link_type == ACL_LINK)
8002 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
8006 if (hdev->discovery.result_filtering) {
8007 /* We are using service discovery */
8008 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8013 /* Make sure that the buffer is big enough. The 5 extra bytes
8014 * are for the potential CoD field.
8016 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8019 memset(buf, 0, sizeof(buf));
8021 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8022 * RSSI value was reported as 0 when not available. This behavior
8023 * is kept when using device discovery. This is required for full
8024 * backwards compatibility with the API.
8026 * However when using service discovery, the value 127 will be
8027 * returned when the RSSI is not available.
8029 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8030 link_type == ACL_LINK)
8033 bacpy(&ev->addr.bdaddr, bdaddr);
8034 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8036 ev->flags = cpu_to_le32(flags);
8039 /* Copy EIR or advertising data into event */
8040 memcpy(ev->eir, eir, eir_len);
8042 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
8043 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8046 if (scan_rsp_len > 0)
8047 /* Append scan response data to event */
8048 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8050 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8051 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8053 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8056 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8057 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8059 struct mgmt_ev_device_found *ev;
8060 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8063 ev = (struct mgmt_ev_device_found *) buf;
8065 memset(buf, 0, sizeof(buf));
8067 bacpy(&ev->addr.bdaddr, bdaddr);
8068 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8071 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8074 ev->eir_len = cpu_to_le16(eir_len);
8076 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8079 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8081 struct mgmt_ev_discovering ev;
8083 BT_DBG("%s discovering %u", hdev->name, discovering);
8085 memset(&ev, 0, sizeof(ev));
8086 ev.type = hdev->discovery.type;
8087 ev.discovering = discovering;
8089 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8092 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8094 BT_DBG("%s status %u", hdev->name, status);
8097 void mgmt_reenable_advertising(struct hci_dev *hdev)
8099 struct hci_request req;
8101 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
8102 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
8105 hci_req_init(&req, hdev);
8106 enable_advertising(&req);
8107 hci_req_run(&req, adv_enable_complete);
8110 static struct hci_mgmt_chan chan = {
8111 .channel = HCI_CHANNEL_CONTROL,
8112 .handler_count = ARRAY_SIZE(mgmt_handlers),
8113 .handlers = mgmt_handlers,
8114 .hdev_init = mgmt_init_hdev,
8119 return hci_mgmt_chan_register(&chan);
8122 void mgmt_exit(void)
8124 hci_mgmt_chan_unregister(&chan);