2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 9
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
51 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
107 static const u16 mgmt_events[] = {
108 MGMT_EV_CONTROLLER_ERROR,
110 MGMT_EV_INDEX_REMOVED,
111 MGMT_EV_NEW_SETTINGS,
112 MGMT_EV_CLASS_OF_DEV_CHANGED,
113 MGMT_EV_LOCAL_NAME_CHANGED,
114 MGMT_EV_NEW_LINK_KEY,
115 MGMT_EV_NEW_LONG_TERM_KEY,
116 MGMT_EV_DEVICE_CONNECTED,
117 MGMT_EV_DEVICE_DISCONNECTED,
118 MGMT_EV_CONNECT_FAILED,
119 MGMT_EV_PIN_CODE_REQUEST,
120 MGMT_EV_USER_CONFIRM_REQUEST,
121 MGMT_EV_USER_PASSKEY_REQUEST,
123 MGMT_EV_DEVICE_FOUND,
125 MGMT_EV_DEVICE_BLOCKED,
126 MGMT_EV_DEVICE_UNBLOCKED,
127 MGMT_EV_DEVICE_UNPAIRED,
128 MGMT_EV_PASSKEY_NOTIFY,
131 MGMT_EV_DEVICE_ADDED,
132 MGMT_EV_DEVICE_REMOVED,
133 MGMT_EV_NEW_CONN_PARAM,
134 MGMT_EV_UNCONF_INDEX_ADDED,
135 MGMT_EV_UNCONF_INDEX_REMOVED,
136 MGMT_EV_NEW_CONFIG_OPTIONS,
137 MGMT_EV_EXT_INDEX_ADDED,
138 MGMT_EV_EXT_INDEX_REMOVED,
139 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 MGMT_EV_ADVERTISING_ADDED,
141 MGMT_EV_ADVERTISING_REMOVED,
144 static const u16 mgmt_untrusted_commands[] = {
145 MGMT_OP_READ_INDEX_LIST,
147 MGMT_OP_READ_UNCONF_INDEX_LIST,
148 MGMT_OP_READ_CONFIG_INFO,
149 MGMT_OP_READ_EXT_INDEX_LIST,
152 static const u16 mgmt_untrusted_events[] = {
154 MGMT_EV_INDEX_REMOVED,
155 MGMT_EV_NEW_SETTINGS,
156 MGMT_EV_CLASS_OF_DEV_CHANGED,
157 MGMT_EV_LOCAL_NAME_CHANGED,
158 MGMT_EV_UNCONF_INDEX_ADDED,
159 MGMT_EV_UNCONF_INDEX_REMOVED,
160 MGMT_EV_NEW_CONFIG_OPTIONS,
161 MGMT_EV_EXT_INDEX_ADDED,
162 MGMT_EV_EXT_INDEX_REMOVED,
165 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 "\x00\x00\x00\x00\x00\x00\x00\x00"
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table[] = {
173 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
174 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
175 MGMT_STATUS_FAILED, /* Hardware Failure */
176 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
177 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
178 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
179 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
180 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
181 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
182 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
183 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
184 MGMT_STATUS_BUSY, /* Command Disallowed */
185 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
186 MGMT_STATUS_REJECTED, /* Rejected Security */
187 MGMT_STATUS_REJECTED, /* Rejected Personal */
188 MGMT_STATUS_TIMEOUT, /* Host Timeout */
189 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
190 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
191 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
192 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
193 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
194 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
195 MGMT_STATUS_BUSY, /* Repeated Attempts */
196 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
197 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
198 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
199 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
200 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
201 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
202 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
203 MGMT_STATUS_FAILED, /* Unspecified Error */
204 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
205 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
206 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
207 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
208 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
209 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
210 MGMT_STATUS_FAILED, /* Unit Link Key Used */
211 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
212 MGMT_STATUS_TIMEOUT, /* Instant Passed */
213 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
214 MGMT_STATUS_FAILED, /* Transaction Collision */
215 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
216 MGMT_STATUS_REJECTED, /* QoS Rejected */
217 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
218 MGMT_STATUS_REJECTED, /* Insufficient Security */
219 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
220 MGMT_STATUS_BUSY, /* Role Switch Pending */
221 MGMT_STATUS_FAILED, /* Slot Violation */
222 MGMT_STATUS_FAILED, /* Role Switch Failed */
223 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
224 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
225 MGMT_STATUS_BUSY, /* Host Busy Pairing */
226 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
227 MGMT_STATUS_BUSY, /* Controller Busy */
228 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
229 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
230 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
231 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
232 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
235 static u8 mgmt_status(u8 hci_status)
237 if (hci_status < ARRAY_SIZE(mgmt_status_table))
238 return mgmt_status_table[hci_status];
240 return MGMT_STATUS_FAILED;
243 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
246 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
250 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
251 u16 len, int flag, struct sock *skip_sk)
253 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
257 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
258 u16 len, struct sock *skip_sk)
260 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 HCI_MGMT_GENERIC_EVENTS, skip_sk);
264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 struct sock *skip_sk)
267 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 HCI_SOCK_TRUSTED, skip_sk);
271 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
274 struct mgmt_rp_read_version rp;
276 BT_DBG("sock %p", sk);
278 rp.version = MGMT_VERSION;
279 rp.revision = cpu_to_le16(MGMT_REVISION);
281 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
285 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
288 struct mgmt_rp_read_commands *rp;
289 u16 num_commands, num_events;
293 BT_DBG("sock %p", sk);
295 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
296 num_commands = ARRAY_SIZE(mgmt_commands);
297 num_events = ARRAY_SIZE(mgmt_events);
299 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
300 num_events = ARRAY_SIZE(mgmt_untrusted_events);
303 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
305 rp = kmalloc(rp_size, GFP_KERNEL);
309 rp->num_commands = cpu_to_le16(num_commands);
310 rp->num_events = cpu_to_le16(num_events);
312 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
313 __le16 *opcode = rp->opcodes;
315 for (i = 0; i < num_commands; i++, opcode++)
316 put_unaligned_le16(mgmt_commands[i], opcode);
318 for (i = 0; i < num_events; i++, opcode++)
319 put_unaligned_le16(mgmt_events[i], opcode);
321 __le16 *opcode = rp->opcodes;
323 for (i = 0; i < num_commands; i++, opcode++)
324 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
326 for (i = 0; i < num_events; i++, opcode++)
327 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
330 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
337 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
340 struct mgmt_rp_read_index_list *rp;
346 BT_DBG("sock %p", sk);
348 read_lock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (d->dev_type == HCI_BREDR &&
353 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
357 rp_len = sizeof(*rp) + (2 * count);
358 rp = kmalloc(rp_len, GFP_ATOMIC);
360 read_unlock(&hci_dev_list_lock);
365 list_for_each_entry(d, &hci_dev_list, list) {
366 if (hci_dev_test_flag(d, HCI_SETUP) ||
367 hci_dev_test_flag(d, HCI_CONFIG) ||
368 hci_dev_test_flag(d, HCI_USER_CHANNEL))
371 /* Devices marked as raw-only are neither configured
372 * nor unconfigured controllers.
374 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
377 if (d->dev_type == HCI_BREDR &&
378 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
379 rp->index[count++] = cpu_to_le16(d->id);
380 BT_DBG("Added hci%u", d->id);
384 rp->num_controllers = cpu_to_le16(count);
385 rp_len = sizeof(*rp) + (2 * count);
387 read_unlock(&hci_dev_list_lock);
389 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
397 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
398 void *data, u16 data_len)
400 struct mgmt_rp_read_unconf_index_list *rp;
406 BT_DBG("sock %p", sk);
408 read_lock(&hci_dev_list_lock);
411 list_for_each_entry(d, &hci_dev_list, list) {
412 if (d->dev_type == HCI_BREDR &&
413 hci_dev_test_flag(d, HCI_UNCONFIGURED))
417 rp_len = sizeof(*rp) + (2 * count);
418 rp = kmalloc(rp_len, GFP_ATOMIC);
420 read_unlock(&hci_dev_list_lock);
425 list_for_each_entry(d, &hci_dev_list, list) {
426 if (hci_dev_test_flag(d, HCI_SETUP) ||
427 hci_dev_test_flag(d, HCI_CONFIG) ||
428 hci_dev_test_flag(d, HCI_USER_CHANNEL))
431 /* Devices marked as raw-only are neither configured
432 * nor unconfigured controllers.
434 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
437 if (d->dev_type == HCI_BREDR &&
438 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
439 rp->index[count++] = cpu_to_le16(d->id);
440 BT_DBG("Added hci%u", d->id);
444 rp->num_controllers = cpu_to_le16(count);
445 rp_len = sizeof(*rp) + (2 * count);
447 read_unlock(&hci_dev_list_lock);
449 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
450 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
457 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
458 void *data, u16 data_len)
460 struct mgmt_rp_read_ext_index_list *rp;
466 BT_DBG("sock %p", sk);
468 read_lock(&hci_dev_list_lock);
471 list_for_each_entry(d, &hci_dev_list, list) {
472 if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
476 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
477 rp = kmalloc(rp_len, GFP_ATOMIC);
479 read_unlock(&hci_dev_list_lock);
484 list_for_each_entry(d, &hci_dev_list, list) {
485 if (hci_dev_test_flag(d, HCI_SETUP) ||
486 hci_dev_test_flag(d, HCI_CONFIG) ||
487 hci_dev_test_flag(d, HCI_USER_CHANNEL))
490 /* Devices marked as raw-only are neither configured
491 * nor unconfigured controllers.
493 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
496 if (d->dev_type == HCI_BREDR) {
497 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
498 rp->entry[count].type = 0x01;
500 rp->entry[count].type = 0x00;
501 } else if (d->dev_type == HCI_AMP) {
502 rp->entry[count].type = 0x02;
507 rp->entry[count].bus = d->bus;
508 rp->entry[count++].index = cpu_to_le16(d->id);
509 BT_DBG("Added hci%u", d->id);
512 rp->num_controllers = cpu_to_le16(count);
513 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
515 read_unlock(&hci_dev_list_lock);
517 /* If this command is called at least once, then all the
518 * default index and unconfigured index events are disabled
519 * and from now on only extended index events are used.
521 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
522 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
523 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
525 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
526 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
533 static bool is_configured(struct hci_dev *hdev)
535 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
536 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
539 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
540 !bacmp(&hdev->public_addr, BDADDR_ANY))
546 static __le32 get_missing_options(struct hci_dev *hdev)
550 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
552 options |= MGMT_OPTION_EXTERNAL_CONFIG;
554 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
555 !bacmp(&hdev->public_addr, BDADDR_ANY))
556 options |= MGMT_OPTION_PUBLIC_ADDRESS;
558 return cpu_to_le32(options);
561 static int new_options(struct hci_dev *hdev, struct sock *skip)
563 __le32 options = get_missing_options(hdev);
565 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
566 sizeof(options), skip);
569 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
571 __le32 options = get_missing_options(hdev);
573 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
577 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
578 void *data, u16 data_len)
580 struct mgmt_rp_read_config_info rp;
583 BT_DBG("sock %p %s", sk, hdev->name);
587 memset(&rp, 0, sizeof(rp));
588 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
590 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
591 options |= MGMT_OPTION_EXTERNAL_CONFIG;
593 if (hdev->set_bdaddr)
594 options |= MGMT_OPTION_PUBLIC_ADDRESS;
596 rp.supported_options = cpu_to_le32(options);
597 rp.missing_options = get_missing_options(hdev);
599 hci_dev_unlock(hdev);
601 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
605 static u32 get_supported_settings(struct hci_dev *hdev)
609 settings |= MGMT_SETTING_POWERED;
610 settings |= MGMT_SETTING_BONDABLE;
611 settings |= MGMT_SETTING_DEBUG_KEYS;
612 settings |= MGMT_SETTING_CONNECTABLE;
613 settings |= MGMT_SETTING_DISCOVERABLE;
615 if (lmp_bredr_capable(hdev)) {
616 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
617 settings |= MGMT_SETTING_FAST_CONNECTABLE;
618 settings |= MGMT_SETTING_BREDR;
619 settings |= MGMT_SETTING_LINK_SECURITY;
621 if (lmp_ssp_capable(hdev)) {
622 settings |= MGMT_SETTING_SSP;
623 settings |= MGMT_SETTING_HS;
626 if (lmp_sc_capable(hdev))
627 settings |= MGMT_SETTING_SECURE_CONN;
630 if (lmp_le_capable(hdev)) {
631 settings |= MGMT_SETTING_LE;
632 settings |= MGMT_SETTING_ADVERTISING;
633 settings |= MGMT_SETTING_SECURE_CONN;
634 settings |= MGMT_SETTING_PRIVACY;
635 settings |= MGMT_SETTING_STATIC_ADDRESS;
638 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
640 settings |= MGMT_SETTING_CONFIGURATION;
645 static u32 get_current_settings(struct hci_dev *hdev)
649 if (hdev_is_powered(hdev))
650 settings |= MGMT_SETTING_POWERED;
652 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
653 settings |= MGMT_SETTING_CONNECTABLE;
655 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
656 settings |= MGMT_SETTING_FAST_CONNECTABLE;
658 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
659 settings |= MGMT_SETTING_DISCOVERABLE;
661 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
662 settings |= MGMT_SETTING_BONDABLE;
664 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
665 settings |= MGMT_SETTING_BREDR;
667 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
668 settings |= MGMT_SETTING_LE;
670 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
671 settings |= MGMT_SETTING_LINK_SECURITY;
673 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
674 settings |= MGMT_SETTING_SSP;
676 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
677 settings |= MGMT_SETTING_HS;
679 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
680 settings |= MGMT_SETTING_ADVERTISING;
682 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
683 settings |= MGMT_SETTING_SECURE_CONN;
685 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
686 settings |= MGMT_SETTING_DEBUG_KEYS;
688 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
689 settings |= MGMT_SETTING_PRIVACY;
691 /* The current setting for static address has two purposes. The
692 * first is to indicate if the static address will be used and
693 * the second is to indicate if it is actually set.
695 * This means if the static address is not configured, this flag
696 * will never be set. If the address is configured, then if the
697 * address is actually used decides if the flag is set or not.
699 * For single mode LE only controllers and dual-mode controllers
700 * with BR/EDR disabled, the existence of the static address will
703 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
704 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
705 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
706 if (bacmp(&hdev->static_addr, BDADDR_ANY))
707 settings |= MGMT_SETTING_STATIC_ADDRESS;
713 #define PNP_INFO_SVCLASS_ID 0x1200
715 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
717 u8 *ptr = data, *uuids_start = NULL;
718 struct bt_uuid *uuid;
723 list_for_each_entry(uuid, &hdev->uuids, list) {
726 if (uuid->size != 16)
729 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
733 if (uuid16 == PNP_INFO_SVCLASS_ID)
739 uuids_start[1] = EIR_UUID16_ALL;
743 /* Stop if not enough space to put next UUID */
744 if ((ptr - data) + sizeof(u16) > len) {
745 uuids_start[1] = EIR_UUID16_SOME;
749 *ptr++ = (uuid16 & 0x00ff);
750 *ptr++ = (uuid16 & 0xff00) >> 8;
751 uuids_start[0] += sizeof(uuid16);
757 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
759 u8 *ptr = data, *uuids_start = NULL;
760 struct bt_uuid *uuid;
765 list_for_each_entry(uuid, &hdev->uuids, list) {
766 if (uuid->size != 32)
772 uuids_start[1] = EIR_UUID32_ALL;
776 /* Stop if not enough space to put next UUID */
777 if ((ptr - data) + sizeof(u32) > len) {
778 uuids_start[1] = EIR_UUID32_SOME;
782 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
784 uuids_start[0] += sizeof(u32);
790 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
792 u8 *ptr = data, *uuids_start = NULL;
793 struct bt_uuid *uuid;
798 list_for_each_entry(uuid, &hdev->uuids, list) {
799 if (uuid->size != 128)
805 uuids_start[1] = EIR_UUID128_ALL;
809 /* Stop if not enough space to put next UUID */
810 if ((ptr - data) + 16 > len) {
811 uuids_start[1] = EIR_UUID128_SOME;
815 memcpy(ptr, uuid->uuid, 16);
817 uuids_start[0] += 16;
823 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
825 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
828 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
829 struct hci_dev *hdev,
832 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
835 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
840 name_len = strlen(hdev->dev_name);
842 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
844 if (name_len > max_len) {
846 ptr[1] = EIR_NAME_SHORT;
848 ptr[1] = EIR_NAME_COMPLETE;
850 ptr[0] = name_len + 1;
852 memcpy(ptr + 2, hdev->dev_name, name_len);
854 ad_len += (name_len + 2);
855 ptr += (name_len + 2);
861 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
863 /* TODO: Set the appropriate entries based on advertising instance flags
864 * here once flags other than 0 are supported.
866 memcpy(ptr, hdev->adv_instance.scan_rsp_data,
867 hdev->adv_instance.scan_rsp_len);
869 return hdev->adv_instance.scan_rsp_len;
872 static void update_scan_rsp_data_for_instance(struct hci_request *req,
875 struct hci_dev *hdev = req->hdev;
876 struct hci_cp_le_set_scan_rsp_data cp;
879 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 memset(&cp, 0, sizeof(cp));
885 len = create_instance_scan_rsp_data(hdev, cp.data);
887 len = create_default_scan_rsp_data(hdev, cp.data);
889 if (hdev->scan_rsp_data_len == len &&
890 !memcmp(cp.data, hdev->scan_rsp_data, len))
893 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
894 hdev->scan_rsp_data_len = len;
898 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
901 static void update_scan_rsp_data(struct hci_request *req)
903 struct hci_dev *hdev = req->hdev;
906 /* The "Set Advertising" setting supersedes the "Add Advertising"
907 * setting. Here we set the scan response data based on which
908 * setting was set. When neither apply, default to the global settings,
909 * represented by instance "0".
911 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
912 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
917 update_scan_rsp_data_for_instance(req, instance);
920 static u8 get_adv_discov_flags(struct hci_dev *hdev)
922 struct mgmt_pending_cmd *cmd;
924 /* If there's a pending mgmt command the flags will not yet have
925 * their final values, so check for this first.
927 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
929 struct mgmt_mode *cp = cmd->param;
931 return LE_AD_GENERAL;
932 else if (cp->val == 0x02)
933 return LE_AD_LIMITED;
935 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
936 return LE_AD_LIMITED;
937 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
938 return LE_AD_GENERAL;
944 static u8 create_default_adv_data(struct hci_dev *hdev, u8 *ptr)
946 u8 ad_len = 0, flags = 0;
948 flags |= get_adv_discov_flags(hdev);
950 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
951 flags |= LE_AD_NO_BREDR;
954 BT_DBG("adv flags 0x%02x", flags);
964 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
966 ptr[1] = EIR_TX_POWER;
967 ptr[2] = (u8) hdev->adv_tx_power;
976 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 *ptr)
978 u8 ad_len = 0, flags = 0;
980 /* The Add Advertising command allows userspace to set both the general
981 * and limited discoverable flags.
983 if (hdev->adv_instance.flags & MGMT_ADV_FLAG_DISCOV)
984 flags |= LE_AD_GENERAL;
986 if (hdev->adv_instance.flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
987 flags |= LE_AD_LIMITED;
989 if (flags || (hdev->adv_instance.flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
990 /* If a discovery flag wasn't provided, simply use the global
994 flags |= get_adv_discov_flags(hdev);
996 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
997 flags |= LE_AD_NO_BREDR;
1007 memcpy(ptr, hdev->adv_instance.adv_data,
1008 hdev->adv_instance.adv_data_len);
1009 ad_len += hdev->adv_instance.adv_data_len;
1014 static void update_adv_data_for_instance(struct hci_request *req, u8 instance)
1016 struct hci_dev *hdev = req->hdev;
1017 struct hci_cp_le_set_adv_data cp;
1020 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1023 memset(&cp, 0, sizeof(cp));
1026 len = create_instance_adv_data(hdev, cp.data);
1028 len = create_default_adv_data(hdev, cp.data);
1030 /* There's nothing to do if the data hasn't changed */
1031 if (hdev->adv_data_len == len &&
1032 memcmp(cp.data, hdev->adv_data, len) == 0)
1035 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1036 hdev->adv_data_len = len;
1040 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1043 static u8 get_current_adv_instance(struct hci_dev *hdev)
1045 /* The "Set Advertising" setting supersedes the "Add Advertising"
1046 * setting. Here we set the advertising data based on which
1047 * setting was set. When neither apply, default to the global settings,
1048 * represented by instance "0".
1050 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
1051 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
1057 static bool get_connectable(struct hci_dev *hdev)
1059 struct mgmt_pending_cmd *cmd;
1061 /* If there's a pending mgmt command the flag will not yet have
1062 * it's final value, so check for this first.
1064 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1066 struct mgmt_mode *cp = cmd->param;
1071 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1074 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1078 if (instance > 0x01)
1082 return hdev->adv_instance.flags;
1086 /* For instance 0, assemble the flags from global settings */
1087 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE) ||
1088 get_connectable(hdev))
1089 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1091 /* TODO: Add the rest of the flags */
1096 static void update_adv_data(struct hci_request *req)
1098 struct hci_dev *hdev = req->hdev;
1099 u8 instance = get_current_adv_instance(hdev);
1101 update_adv_data_for_instance(req, instance);
1104 int mgmt_update_adv_data(struct hci_dev *hdev)
1106 struct hci_request req;
1108 hci_req_init(&req, hdev);
1109 update_adv_data(&req);
1111 return hci_req_run(&req, NULL);
1114 static void create_eir(struct hci_dev *hdev, u8 *data)
1119 name_len = strlen(hdev->dev_name);
1123 if (name_len > 48) {
1125 ptr[1] = EIR_NAME_SHORT;
1127 ptr[1] = EIR_NAME_COMPLETE;
1129 /* EIR Data length */
1130 ptr[0] = name_len + 1;
1132 memcpy(ptr + 2, hdev->dev_name, name_len);
1134 ptr += (name_len + 2);
1137 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1139 ptr[1] = EIR_TX_POWER;
1140 ptr[2] = (u8) hdev->inq_tx_power;
1145 if (hdev->devid_source > 0) {
1147 ptr[1] = EIR_DEVICE_ID;
1149 put_unaligned_le16(hdev->devid_source, ptr + 2);
1150 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1151 put_unaligned_le16(hdev->devid_product, ptr + 6);
1152 put_unaligned_le16(hdev->devid_version, ptr + 8);
1157 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1158 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1159 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1162 static void update_eir(struct hci_request *req)
1164 struct hci_dev *hdev = req->hdev;
1165 struct hci_cp_write_eir cp;
1167 if (!hdev_is_powered(hdev))
1170 if (!lmp_ext_inq_capable(hdev))
1173 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1176 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1179 memset(&cp, 0, sizeof(cp));
1181 create_eir(hdev, cp.data);
1183 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1186 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1188 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1191 static u8 get_service_classes(struct hci_dev *hdev)
1193 struct bt_uuid *uuid;
1196 list_for_each_entry(uuid, &hdev->uuids, list)
1197 val |= uuid->svc_hint;
1202 static void update_class(struct hci_request *req)
1204 struct hci_dev *hdev = req->hdev;
1207 BT_DBG("%s", hdev->name);
1209 if (!hdev_is_powered(hdev))
1212 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1215 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1218 cod[0] = hdev->minor_class;
1219 cod[1] = hdev->major_class;
1220 cod[2] = get_service_classes(hdev);
1222 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1225 if (memcmp(cod, hdev->dev_class, 3) == 0)
1228 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1231 static void disable_advertising(struct hci_request *req)
1235 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1238 static void enable_advertising(struct hci_request *req)
1240 struct hci_dev *hdev = req->hdev;
1241 struct hci_cp_le_set_adv_param cp;
1242 u8 own_addr_type, enable = 0x01;
1247 if (hci_conn_num(hdev, LE_LINK) > 0)
1250 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1251 disable_advertising(req);
1253 /* Clear the HCI_LE_ADV bit temporarily so that the
1254 * hci_update_random_address knows that it's safe to go ahead
1255 * and write a new random address. The flag will be set back on
1256 * as soon as the SET_ADV_ENABLE HCI command completes.
1258 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1260 instance = get_current_adv_instance(hdev);
1261 flags = get_adv_instance_flags(hdev, instance);
1262 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE);
1264 /* Set require_privacy to true only when non-connectable
1265 * advertising is used. In that case it is fine to use a
1266 * non-resolvable private address.
1268 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1271 memset(&cp, 0, sizeof(cp));
1272 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1273 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1274 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1275 cp.own_address_type = own_addr_type;
1276 cp.channel_map = hdev->le_adv_channel_map;
1278 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1280 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1283 static void service_cache_off(struct work_struct *work)
1285 struct hci_dev *hdev = container_of(work, struct hci_dev,
1286 service_cache.work);
1287 struct hci_request req;
1289 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1292 hci_req_init(&req, hdev);
1299 hci_dev_unlock(hdev);
1301 hci_req_run(&req, NULL);
1304 static void rpa_expired(struct work_struct *work)
1306 struct hci_dev *hdev = container_of(work, struct hci_dev,
1308 struct hci_request req;
1312 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1314 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1317 /* The generation of a new RPA and programming it into the
1318 * controller happens in the enable_advertising() function.
1320 hci_req_init(&req, hdev);
1321 enable_advertising(&req);
1322 hci_req_run(&req, NULL);
1325 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1327 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1330 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1331 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1333 /* Non-mgmt controlled devices get this bit set
1334 * implicitly so that pairing works for them, however
1335 * for mgmt we require user-space to explicitly enable
1338 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1341 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1342 void *data, u16 data_len)
1344 struct mgmt_rp_read_info rp;
1346 BT_DBG("sock %p %s", sk, hdev->name);
1350 memset(&rp, 0, sizeof(rp));
1352 bacpy(&rp.bdaddr, &hdev->bdaddr);
1354 rp.version = hdev->hci_ver;
1355 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1357 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1358 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1360 memcpy(rp.dev_class, hdev->dev_class, 3);
1362 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1363 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1365 hci_dev_unlock(hdev);
1367 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1371 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1373 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1375 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1379 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1381 BT_DBG("%s status 0x%02x", hdev->name, status);
1383 if (hci_conn_count(hdev) == 0) {
1384 cancel_delayed_work(&hdev->power_off);
1385 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1389 static bool hci_stop_discovery(struct hci_request *req)
1391 struct hci_dev *hdev = req->hdev;
1392 struct hci_cp_remote_name_req_cancel cp;
1393 struct inquiry_entry *e;
1395 switch (hdev->discovery.state) {
1396 case DISCOVERY_FINDING:
1397 if (test_bit(HCI_INQUIRY, &hdev->flags))
1398 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1400 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1401 cancel_delayed_work(&hdev->le_scan_disable);
1402 hci_req_add_le_scan_disable(req);
1407 case DISCOVERY_RESOLVING:
1408 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1413 bacpy(&cp.bdaddr, &e->data.bdaddr);
1414 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1420 /* Passive scanning */
1421 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1422 hci_req_add_le_scan_disable(req);
1432 static void advertising_added(struct sock *sk, struct hci_dev *hdev,
1435 struct mgmt_ev_advertising_added ev;
1437 ev.instance = instance;
1439 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1442 static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
1445 struct mgmt_ev_advertising_removed ev;
1447 ev.instance = instance;
1449 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1452 static void clear_adv_instance(struct hci_dev *hdev)
1454 struct hci_request req;
1456 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1459 if (hdev->adv_instance.timeout)
1460 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
1462 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
1463 advertising_removed(NULL, hdev, 1);
1464 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1466 if (!hdev_is_powered(hdev) ||
1467 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1470 hci_req_init(&req, hdev);
1471 disable_advertising(&req);
1472 hci_req_run(&req, NULL);
1475 static int clean_up_hci_state(struct hci_dev *hdev)
1477 struct hci_request req;
1478 struct hci_conn *conn;
1479 bool discov_stopped;
1482 hci_req_init(&req, hdev);
1484 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1485 test_bit(HCI_PSCAN, &hdev->flags)) {
1487 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1490 if (hdev->adv_instance.timeout)
1491 clear_adv_instance(hdev);
1493 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1494 disable_advertising(&req);
1496 discov_stopped = hci_stop_discovery(&req);
1498 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1499 struct hci_cp_disconnect dc;
1500 struct hci_cp_reject_conn_req rej;
1502 switch (conn->state) {
1505 dc.handle = cpu_to_le16(conn->handle);
1506 dc.reason = 0x15; /* Terminated due to Power Off */
1507 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1510 if (conn->type == LE_LINK)
1511 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1513 else if (conn->type == ACL_LINK)
1514 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1518 bacpy(&rej.bdaddr, &conn->dst);
1519 rej.reason = 0x15; /* Terminated due to Power Off */
1520 if (conn->type == ACL_LINK)
1521 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1523 else if (conn->type == SCO_LINK)
1524 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1530 err = hci_req_run(&req, clean_up_hci_complete);
1531 if (!err && discov_stopped)
1532 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1537 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1540 struct mgmt_mode *cp = data;
1541 struct mgmt_pending_cmd *cmd;
1544 BT_DBG("request for %s", hdev->name);
1546 if (cp->val != 0x00 && cp->val != 0x01)
1547 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1548 MGMT_STATUS_INVALID_PARAMS);
1552 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1553 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1558 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1559 cancel_delayed_work(&hdev->power_off);
1562 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1564 err = mgmt_powered(hdev, 1);
1569 if (!!cp->val == hdev_is_powered(hdev)) {
1570 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1574 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1581 queue_work(hdev->req_workqueue, &hdev->power_on);
1584 /* Disconnect connections, stop scans, etc */
1585 err = clean_up_hci_state(hdev);
1587 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1588 HCI_POWER_OFF_TIMEOUT);
1590 /* ENODATA means there were no HCI commands queued */
1591 if (err == -ENODATA) {
1592 cancel_delayed_work(&hdev->power_off);
1593 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1599 hci_dev_unlock(hdev);
1603 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1605 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1607 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1611 int mgmt_new_settings(struct hci_dev *hdev)
1613 return new_settings(hdev, NULL);
1618 struct hci_dev *hdev;
1622 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1624 struct cmd_lookup *match = data;
1626 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1628 list_del(&cmd->list);
1630 if (match->sk == NULL) {
1631 match->sk = cmd->sk;
1632 sock_hold(match->sk);
1635 mgmt_pending_free(cmd);
1638 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1642 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1643 mgmt_pending_remove(cmd);
1646 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1648 if (cmd->cmd_complete) {
1651 cmd->cmd_complete(cmd, *status);
1652 mgmt_pending_remove(cmd);
1657 cmd_status_rsp(cmd, data);
1660 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1662 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1663 cmd->param, cmd->param_len);
1666 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1668 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1669 cmd->param, sizeof(struct mgmt_addr_info));
1672 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1674 if (!lmp_bredr_capable(hdev))
1675 return MGMT_STATUS_NOT_SUPPORTED;
1676 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1677 return MGMT_STATUS_REJECTED;
1679 return MGMT_STATUS_SUCCESS;
1682 static u8 mgmt_le_support(struct hci_dev *hdev)
1684 if (!lmp_le_capable(hdev))
1685 return MGMT_STATUS_NOT_SUPPORTED;
1686 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1687 return MGMT_STATUS_REJECTED;
1689 return MGMT_STATUS_SUCCESS;
1692 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1695 struct mgmt_pending_cmd *cmd;
1696 struct mgmt_mode *cp;
1697 struct hci_request req;
1700 BT_DBG("status 0x%02x", status);
1704 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1709 u8 mgmt_err = mgmt_status(status);
1710 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1711 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1717 changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1719 if (hdev->discov_timeout > 0) {
1720 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1721 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1725 changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1728 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1731 new_settings(hdev, cmd->sk);
1733 /* When the discoverable mode gets changed, make sure
1734 * that class of device has the limited discoverable
1735 * bit correctly set. Also update page scan based on whitelist
1738 hci_req_init(&req, hdev);
1739 __hci_update_page_scan(&req);
1741 hci_req_run(&req, NULL);
1744 mgmt_pending_remove(cmd);
1747 hci_dev_unlock(hdev);
1750 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1753 struct mgmt_cp_set_discoverable *cp = data;
1754 struct mgmt_pending_cmd *cmd;
1755 struct hci_request req;
1760 BT_DBG("request for %s", hdev->name);
1762 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1763 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1764 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1765 MGMT_STATUS_REJECTED);
1767 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1768 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1769 MGMT_STATUS_INVALID_PARAMS);
1771 timeout = __le16_to_cpu(cp->timeout);
1773 /* Disabling discoverable requires that no timeout is set,
1774 * and enabling limited discoverable requires a timeout.
1776 if ((cp->val == 0x00 && timeout > 0) ||
1777 (cp->val == 0x02 && timeout == 0))
1778 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1779 MGMT_STATUS_INVALID_PARAMS);
1783 if (!hdev_is_powered(hdev) && timeout > 0) {
1784 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1785 MGMT_STATUS_NOT_POWERED);
1789 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1790 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1791 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1796 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1797 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1798 MGMT_STATUS_REJECTED);
1802 if (!hdev_is_powered(hdev)) {
1803 bool changed = false;
1805 /* Setting limited discoverable when powered off is
1806 * not a valid operation since it requires a timeout
1807 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1809 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1810 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1814 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1819 err = new_settings(hdev, sk);
1824 /* If the current mode is the same, then just update the timeout
1825 * value with the new value. And if only the timeout gets updated,
1826 * then no need for any HCI transactions.
1828 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1829 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1830 HCI_LIMITED_DISCOVERABLE)) {
1831 cancel_delayed_work(&hdev->discov_off);
1832 hdev->discov_timeout = timeout;
1834 if (cp->val && hdev->discov_timeout > 0) {
1835 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1836 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1840 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1844 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1850 /* Cancel any potential discoverable timeout that might be
1851 * still active and store new timeout value. The arming of
1852 * the timeout happens in the complete handler.
1854 cancel_delayed_work(&hdev->discov_off);
1855 hdev->discov_timeout = timeout;
1857 /* Limited discoverable mode */
1858 if (cp->val == 0x02)
1859 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1861 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1863 hci_req_init(&req, hdev);
1865 /* The procedure for LE-only controllers is much simpler - just
1866 * update the advertising data.
1868 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1874 struct hci_cp_write_current_iac_lap hci_cp;
1876 if (cp->val == 0x02) {
1877 /* Limited discoverable mode */
1878 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1879 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1880 hci_cp.iac_lap[1] = 0x8b;
1881 hci_cp.iac_lap[2] = 0x9e;
1882 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1883 hci_cp.iac_lap[4] = 0x8b;
1884 hci_cp.iac_lap[5] = 0x9e;
1886 /* General discoverable mode */
1888 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1889 hci_cp.iac_lap[1] = 0x8b;
1890 hci_cp.iac_lap[2] = 0x9e;
1893 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1894 (hci_cp.num_iac * 3) + 1, &hci_cp);
1896 scan |= SCAN_INQUIRY;
1898 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1901 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1904 update_adv_data(&req);
1906 err = hci_req_run(&req, set_discoverable_complete);
1908 mgmt_pending_remove(cmd);
1911 hci_dev_unlock(hdev);
1915 static void write_fast_connectable(struct hci_request *req, bool enable)
1917 struct hci_dev *hdev = req->hdev;
1918 struct hci_cp_write_page_scan_activity acp;
1921 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1924 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1928 type = PAGE_SCAN_TYPE_INTERLACED;
1930 /* 160 msec page scan interval */
1931 acp.interval = cpu_to_le16(0x0100);
1933 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1935 /* default 1.28 sec page scan */
1936 acp.interval = cpu_to_le16(0x0800);
1939 acp.window = cpu_to_le16(0x0012);
1941 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1942 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1943 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1946 if (hdev->page_scan_type != type)
1947 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1950 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1953 struct mgmt_pending_cmd *cmd;
1954 struct mgmt_mode *cp;
1955 bool conn_changed, discov_changed;
1957 BT_DBG("status 0x%02x", status);
1961 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1966 u8 mgmt_err = mgmt_status(status);
1967 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1973 conn_changed = !hci_dev_test_and_set_flag(hdev,
1975 discov_changed = false;
1977 conn_changed = hci_dev_test_and_clear_flag(hdev,
1979 discov_changed = hci_dev_test_and_clear_flag(hdev,
1983 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1985 if (conn_changed || discov_changed) {
1986 new_settings(hdev, cmd->sk);
1987 hci_update_page_scan(hdev);
1989 mgmt_update_adv_data(hdev);
1990 hci_update_background_scan(hdev);
1994 mgmt_pending_remove(cmd);
1997 hci_dev_unlock(hdev);
2000 static int set_connectable_update_settings(struct hci_dev *hdev,
2001 struct sock *sk, u8 val)
2003 bool changed = false;
2006 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
2010 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
2012 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
2013 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2016 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
2021 hci_update_page_scan(hdev);
2022 hci_update_background_scan(hdev);
2023 return new_settings(hdev, sk);
2029 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
2032 struct mgmt_mode *cp = data;
2033 struct mgmt_pending_cmd *cmd;
2034 struct hci_request req;
2038 BT_DBG("request for %s", hdev->name);
2040 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2041 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2042 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2043 MGMT_STATUS_REJECTED);
2045 if (cp->val != 0x00 && cp->val != 0x01)
2046 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2047 MGMT_STATUS_INVALID_PARAMS);
2051 if (!hdev_is_powered(hdev)) {
2052 err = set_connectable_update_settings(hdev, sk, cp->val);
2056 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
2057 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2058 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2063 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2069 hci_req_init(&req, hdev);
2071 /* If BR/EDR is not enabled and we disable advertising as a
2072 * by-product of disabling connectable, we need to update the
2073 * advertising flags.
2075 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2077 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2078 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2080 update_adv_data(&req);
2081 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2085 /* If we don't have any whitelist entries just
2086 * disable all scanning. If there are entries
2087 * and we had both page and inquiry scanning
2088 * enabled then fall back to only page scanning.
2089 * Otherwise no changes are needed.
2091 if (list_empty(&hdev->whitelist))
2092 scan = SCAN_DISABLED;
2093 else if (test_bit(HCI_ISCAN, &hdev->flags))
2096 goto no_scan_update;
2098 if (test_bit(HCI_ISCAN, &hdev->flags) &&
2099 hdev->discov_timeout > 0)
2100 cancel_delayed_work(&hdev->discov_off);
2103 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2107 /* Update the advertising parameters if necessary */
2108 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2109 enable_advertising(&req);
2111 err = hci_req_run(&req, set_connectable_complete);
2113 mgmt_pending_remove(cmd);
2114 if (err == -ENODATA)
2115 err = set_connectable_update_settings(hdev, sk,
2121 hci_dev_unlock(hdev);
2125 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2128 struct mgmt_mode *cp = data;
2132 BT_DBG("request for %s", hdev->name);
2134 if (cp->val != 0x00 && cp->val != 0x01)
2135 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2136 MGMT_STATUS_INVALID_PARAMS);
2141 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2143 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2145 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2150 err = new_settings(hdev, sk);
2153 hci_dev_unlock(hdev);
2157 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2160 struct mgmt_mode *cp = data;
2161 struct mgmt_pending_cmd *cmd;
2165 BT_DBG("request for %s", hdev->name);
2167 status = mgmt_bredr_support(hdev);
2169 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2172 if (cp->val != 0x00 && cp->val != 0x01)
2173 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2174 MGMT_STATUS_INVALID_PARAMS);
2178 if (!hdev_is_powered(hdev)) {
2179 bool changed = false;
2181 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2182 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2186 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2191 err = new_settings(hdev, sk);
2196 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2197 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2204 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2205 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2209 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2215 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2217 mgmt_pending_remove(cmd);
2222 hci_dev_unlock(hdev);
2226 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2228 struct mgmt_mode *cp = data;
2229 struct mgmt_pending_cmd *cmd;
2233 BT_DBG("request for %s", hdev->name);
2235 status = mgmt_bredr_support(hdev);
2237 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2239 if (!lmp_ssp_capable(hdev))
2240 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2241 MGMT_STATUS_NOT_SUPPORTED);
2243 if (cp->val != 0x00 && cp->val != 0x01)
2244 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2245 MGMT_STATUS_INVALID_PARAMS);
2249 if (!hdev_is_powered(hdev)) {
2253 changed = !hci_dev_test_and_set_flag(hdev,
2256 changed = hci_dev_test_and_clear_flag(hdev,
2259 changed = hci_dev_test_and_clear_flag(hdev,
2262 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2265 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2270 err = new_settings(hdev, sk);
2275 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2276 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2281 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2282 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2286 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2292 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2293 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2294 sizeof(cp->val), &cp->val);
2296 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2298 mgmt_pending_remove(cmd);
2303 hci_dev_unlock(hdev);
2307 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2309 struct mgmt_mode *cp = data;
2314 BT_DBG("request for %s", hdev->name);
2316 status = mgmt_bredr_support(hdev);
2318 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2320 if (!lmp_ssp_capable(hdev))
2321 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2322 MGMT_STATUS_NOT_SUPPORTED);
2324 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2325 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2326 MGMT_STATUS_REJECTED);
2328 if (cp->val != 0x00 && cp->val != 0x01)
2329 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2330 MGMT_STATUS_INVALID_PARAMS);
2334 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2335 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2341 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2343 if (hdev_is_powered(hdev)) {
2344 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2345 MGMT_STATUS_REJECTED);
2349 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2352 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2357 err = new_settings(hdev, sk);
2360 hci_dev_unlock(hdev);
2364 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2366 struct cmd_lookup match = { NULL, hdev };
2371 u8 mgmt_err = mgmt_status(status);
2373 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2378 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2380 new_settings(hdev, match.sk);
2385 /* Make sure the controller has a good default for
2386 * advertising data. Restrict the update to when LE
2387 * has actually been enabled. During power on, the
2388 * update in powered_update_hci will take care of it.
2390 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2391 struct hci_request req;
2393 hci_req_init(&req, hdev);
2394 update_adv_data(&req);
2395 update_scan_rsp_data(&req);
2396 __hci_update_background_scan(&req);
2397 hci_req_run(&req, NULL);
2401 hci_dev_unlock(hdev);
2404 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2406 struct mgmt_mode *cp = data;
2407 struct hci_cp_write_le_host_supported hci_cp;
2408 struct mgmt_pending_cmd *cmd;
2409 struct hci_request req;
2413 BT_DBG("request for %s", hdev->name);
2415 if (!lmp_le_capable(hdev))
2416 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2417 MGMT_STATUS_NOT_SUPPORTED);
2419 if (cp->val != 0x00 && cp->val != 0x01)
2420 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2421 MGMT_STATUS_INVALID_PARAMS);
2423 /* Bluetooth single mode LE only controllers or dual-mode
2424 * controllers configured as LE only devices, do not allow
2425 * switching LE off. These have either LE enabled explicitly
2426 * or BR/EDR has been previously switched off.
2428 * When trying to enable an already enabled LE, then gracefully
2429 * send a positive response. Trying to disable it however will
2430 * result into rejection.
2432 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2433 if (cp->val == 0x01)
2434 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2436 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2437 MGMT_STATUS_REJECTED);
2443 enabled = lmp_host_le_capable(hdev);
2445 if (!hdev_is_powered(hdev) || val == enabled) {
2446 bool changed = false;
2448 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2449 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2453 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2454 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2458 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2463 err = new_settings(hdev, sk);
2468 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2469 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2470 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2475 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2481 hci_req_init(&req, hdev);
2483 memset(&hci_cp, 0, sizeof(hci_cp));
2487 hci_cp.simul = 0x00;
2489 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2490 disable_advertising(&req);
2493 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2496 err = hci_req_run(&req, le_enable_complete);
2498 mgmt_pending_remove(cmd);
2501 hci_dev_unlock(hdev);
2505 /* This is a helper function to test for pending mgmt commands that can
2506 * cause CoD or EIR HCI commands. We can only allow one such pending
2507 * mgmt command at a time since otherwise we cannot easily track what
2508 * the current values are, will be, and based on that calculate if a new
2509 * HCI command needs to be sent and if yes with what value.
2511 static bool pending_eir_or_class(struct hci_dev *hdev)
2513 struct mgmt_pending_cmd *cmd;
2515 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2516 switch (cmd->opcode) {
2517 case MGMT_OP_ADD_UUID:
2518 case MGMT_OP_REMOVE_UUID:
2519 case MGMT_OP_SET_DEV_CLASS:
2520 case MGMT_OP_SET_POWERED:
2528 static const u8 bluetooth_base_uuid[] = {
2529 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2530 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2533 static u8 get_uuid_size(const u8 *uuid)
2537 if (memcmp(uuid, bluetooth_base_uuid, 12))
2540 val = get_unaligned_le32(&uuid[12]);
2547 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2549 struct mgmt_pending_cmd *cmd;
2553 cmd = pending_find(mgmt_op, hdev);
2557 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2558 mgmt_status(status), hdev->dev_class, 3);
2560 mgmt_pending_remove(cmd);
2563 hci_dev_unlock(hdev);
2566 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2568 BT_DBG("status 0x%02x", status);
2570 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2573 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2575 struct mgmt_cp_add_uuid *cp = data;
2576 struct mgmt_pending_cmd *cmd;
2577 struct hci_request req;
2578 struct bt_uuid *uuid;
2581 BT_DBG("request for %s", hdev->name);
2585 if (pending_eir_or_class(hdev)) {
2586 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2591 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2597 memcpy(uuid->uuid, cp->uuid, 16);
2598 uuid->svc_hint = cp->svc_hint;
2599 uuid->size = get_uuid_size(cp->uuid);
2601 list_add_tail(&uuid->list, &hdev->uuids);
2603 hci_req_init(&req, hdev);
2608 err = hci_req_run(&req, add_uuid_complete);
2610 if (err != -ENODATA)
2613 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2614 hdev->dev_class, 3);
2618 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2627 hci_dev_unlock(hdev);
2631 static bool enable_service_cache(struct hci_dev *hdev)
2633 if (!hdev_is_powered(hdev))
2636 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2637 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2645 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2647 BT_DBG("status 0x%02x", status);
2649 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2652 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2655 struct mgmt_cp_remove_uuid *cp = data;
2656 struct mgmt_pending_cmd *cmd;
2657 struct bt_uuid *match, *tmp;
2658 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2659 struct hci_request req;
2662 BT_DBG("request for %s", hdev->name);
2666 if (pending_eir_or_class(hdev)) {
2667 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2672 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2673 hci_uuids_clear(hdev);
2675 if (enable_service_cache(hdev)) {
2676 err = mgmt_cmd_complete(sk, hdev->id,
2677 MGMT_OP_REMOVE_UUID,
2678 0, hdev->dev_class, 3);
2687 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2688 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2691 list_del(&match->list);
2697 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2698 MGMT_STATUS_INVALID_PARAMS);
2703 hci_req_init(&req, hdev);
2708 err = hci_req_run(&req, remove_uuid_complete);
2710 if (err != -ENODATA)
2713 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2714 hdev->dev_class, 3);
2718 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2727 hci_dev_unlock(hdev);
2731 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2733 BT_DBG("status 0x%02x", status);
2735 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2738 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2741 struct mgmt_cp_set_dev_class *cp = data;
2742 struct mgmt_pending_cmd *cmd;
2743 struct hci_request req;
2746 BT_DBG("request for %s", hdev->name);
2748 if (!lmp_bredr_capable(hdev))
2749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2750 MGMT_STATUS_NOT_SUPPORTED);
2754 if (pending_eir_or_class(hdev)) {
2755 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2760 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2761 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2762 MGMT_STATUS_INVALID_PARAMS);
2766 hdev->major_class = cp->major;
2767 hdev->minor_class = cp->minor;
2769 if (!hdev_is_powered(hdev)) {
2770 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2771 hdev->dev_class, 3);
2775 hci_req_init(&req, hdev);
2777 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2778 hci_dev_unlock(hdev);
2779 cancel_delayed_work_sync(&hdev->service_cache);
2786 err = hci_req_run(&req, set_class_complete);
2788 if (err != -ENODATA)
2791 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2792 hdev->dev_class, 3);
2796 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2805 hci_dev_unlock(hdev);
2809 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2812 struct mgmt_cp_load_link_keys *cp = data;
2813 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2814 sizeof(struct mgmt_link_key_info));
2815 u16 key_count, expected_len;
2819 BT_DBG("request for %s", hdev->name);
2821 if (!lmp_bredr_capable(hdev))
2822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2823 MGMT_STATUS_NOT_SUPPORTED);
2825 key_count = __le16_to_cpu(cp->key_count);
2826 if (key_count > max_key_count) {
2827 BT_ERR("load_link_keys: too big key_count value %u",
2829 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2830 MGMT_STATUS_INVALID_PARAMS);
2833 expected_len = sizeof(*cp) + key_count *
2834 sizeof(struct mgmt_link_key_info);
2835 if (expected_len != len) {
2836 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2839 MGMT_STATUS_INVALID_PARAMS);
2842 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2843 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2844 MGMT_STATUS_INVALID_PARAMS);
2846 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2849 for (i = 0; i < key_count; i++) {
2850 struct mgmt_link_key_info *key = &cp->keys[i];
2852 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2853 return mgmt_cmd_status(sk, hdev->id,
2854 MGMT_OP_LOAD_LINK_KEYS,
2855 MGMT_STATUS_INVALID_PARAMS);
2860 hci_link_keys_clear(hdev);
2863 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2865 changed = hci_dev_test_and_clear_flag(hdev,
2866 HCI_KEEP_DEBUG_KEYS);
2869 new_settings(hdev, NULL);
2871 for (i = 0; i < key_count; i++) {
2872 struct mgmt_link_key_info *key = &cp->keys[i];
2874 /* Always ignore debug keys and require a new pairing if
2875 * the user wants to use them.
2877 if (key->type == HCI_LK_DEBUG_COMBINATION)
2880 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2881 key->type, key->pin_len, NULL);
2884 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2886 hci_dev_unlock(hdev);
2891 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2892 u8 addr_type, struct sock *skip_sk)
2894 struct mgmt_ev_device_unpaired ev;
2896 bacpy(&ev.addr.bdaddr, bdaddr);
2897 ev.addr.type = addr_type;
2899 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2903 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2906 struct mgmt_cp_unpair_device *cp = data;
2907 struct mgmt_rp_unpair_device rp;
2908 struct hci_cp_disconnect dc;
2909 struct mgmt_pending_cmd *cmd;
2910 struct hci_conn *conn;
2913 memset(&rp, 0, sizeof(rp));
2914 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2915 rp.addr.type = cp->addr.type;
2917 if (!bdaddr_type_is_valid(cp->addr.type))
2918 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2919 MGMT_STATUS_INVALID_PARAMS,
2922 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2923 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2924 MGMT_STATUS_INVALID_PARAMS,
2929 if (!hdev_is_powered(hdev)) {
2930 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2931 MGMT_STATUS_NOT_POWERED, &rp,
2936 if (cp->addr.type == BDADDR_BREDR) {
2937 /* If disconnection is requested, then look up the
2938 * connection. If the remote device is connected, it
2939 * will be later used to terminate the link.
2941 * Setting it to NULL explicitly will cause no
2942 * termination of the link.
2945 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2950 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2954 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2957 /* Defer clearing up the connection parameters
2958 * until closing to give a chance of keeping
2959 * them if a repairing happens.
2961 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2963 /* If disconnection is not requested, then
2964 * clear the connection variable so that the
2965 * link is not terminated.
2967 if (!cp->disconnect)
2971 if (cp->addr.type == BDADDR_LE_PUBLIC)
2972 addr_type = ADDR_LE_DEV_PUBLIC;
2974 addr_type = ADDR_LE_DEV_RANDOM;
2976 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2978 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2982 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2983 MGMT_STATUS_NOT_PAIRED, &rp,
2988 /* If the connection variable is set, then termination of the
2989 * link is requested.
2992 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2994 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2998 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3005 cmd->cmd_complete = addr_cmd_complete;
3007 dc.handle = cpu_to_le16(conn->handle);
3008 dc.reason = 0x13; /* Remote User Terminated Connection */
3009 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
3011 mgmt_pending_remove(cmd);
3014 hci_dev_unlock(hdev);
3018 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3021 struct mgmt_cp_disconnect *cp = data;
3022 struct mgmt_rp_disconnect rp;
3023 struct mgmt_pending_cmd *cmd;
3024 struct hci_conn *conn;
3029 memset(&rp, 0, sizeof(rp));
3030 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3031 rp.addr.type = cp->addr.type;
3033 if (!bdaddr_type_is_valid(cp->addr.type))
3034 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3035 MGMT_STATUS_INVALID_PARAMS,
3040 if (!test_bit(HCI_UP, &hdev->flags)) {
3041 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3042 MGMT_STATUS_NOT_POWERED, &rp,
3047 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3048 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3049 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3053 if (cp->addr.type == BDADDR_BREDR)
3054 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3057 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
3059 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3060 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3061 MGMT_STATUS_NOT_CONNECTED, &rp,
3066 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3072 cmd->cmd_complete = generic_cmd_complete;
3074 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3076 mgmt_pending_remove(cmd);
3079 hci_dev_unlock(hdev);
3083 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3085 switch (link_type) {
3087 switch (addr_type) {
3088 case ADDR_LE_DEV_PUBLIC:
3089 return BDADDR_LE_PUBLIC;
3092 /* Fallback to LE Random address type */
3093 return BDADDR_LE_RANDOM;
3097 /* Fallback to BR/EDR type */
3098 return BDADDR_BREDR;
3102 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3105 struct mgmt_rp_get_connections *rp;
3115 if (!hdev_is_powered(hdev)) {
3116 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3117 MGMT_STATUS_NOT_POWERED);
3122 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3123 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3127 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3128 rp = kmalloc(rp_len, GFP_KERNEL);
3135 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3136 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3138 bacpy(&rp->addr[i].bdaddr, &c->dst);
3139 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3140 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3145 rp->conn_count = cpu_to_le16(i);
3147 /* Recalculate length in case of filtered SCO connections, etc */
3148 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3150 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3156 hci_dev_unlock(hdev);
3160 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3161 struct mgmt_cp_pin_code_neg_reply *cp)
3163 struct mgmt_pending_cmd *cmd;
3166 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3171 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3172 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3174 mgmt_pending_remove(cmd);
3179 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3182 struct hci_conn *conn;
3183 struct mgmt_cp_pin_code_reply *cp = data;
3184 struct hci_cp_pin_code_reply reply;
3185 struct mgmt_pending_cmd *cmd;
3192 if (!hdev_is_powered(hdev)) {
3193 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3194 MGMT_STATUS_NOT_POWERED);
3198 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3200 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3201 MGMT_STATUS_NOT_CONNECTED);
3205 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3206 struct mgmt_cp_pin_code_neg_reply ncp;
3208 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3210 BT_ERR("PIN code is not 16 bytes long");
3212 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3214 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3215 MGMT_STATUS_INVALID_PARAMS);
3220 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3226 cmd->cmd_complete = addr_cmd_complete;
3228 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3229 reply.pin_len = cp->pin_len;
3230 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3232 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3234 mgmt_pending_remove(cmd);
3237 hci_dev_unlock(hdev);
3241 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3244 struct mgmt_cp_set_io_capability *cp = data;
3248 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3249 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3250 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3254 hdev->io_capability = cp->io_capability;
3256 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3257 hdev->io_capability);
3259 hci_dev_unlock(hdev);
3261 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3265 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3267 struct hci_dev *hdev = conn->hdev;
3268 struct mgmt_pending_cmd *cmd;
3270 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3271 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3274 if (cmd->user_data != conn)
3283 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3285 struct mgmt_rp_pair_device rp;
3286 struct hci_conn *conn = cmd->user_data;
3289 bacpy(&rp.addr.bdaddr, &conn->dst);
3290 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3292 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3293 status, &rp, sizeof(rp));
3295 /* So we don't get further callbacks for this connection */
3296 conn->connect_cfm_cb = NULL;
3297 conn->security_cfm_cb = NULL;
3298 conn->disconn_cfm_cb = NULL;
3300 hci_conn_drop(conn);
3302 /* The device is paired so there is no need to remove
3303 * its connection parameters anymore.
3305 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3312 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3314 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3315 struct mgmt_pending_cmd *cmd;
3317 cmd = find_pairing(conn);
3319 cmd->cmd_complete(cmd, status);
3320 mgmt_pending_remove(cmd);
3324 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3326 struct mgmt_pending_cmd *cmd;
3328 BT_DBG("status %u", status);
3330 cmd = find_pairing(conn);
3332 BT_DBG("Unable to find a pending command");
3336 cmd->cmd_complete(cmd, mgmt_status(status));
3337 mgmt_pending_remove(cmd);
3340 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3342 struct mgmt_pending_cmd *cmd;
3344 BT_DBG("status %u", status);
3349 cmd = find_pairing(conn);
3351 BT_DBG("Unable to find a pending command");
3355 cmd->cmd_complete(cmd, mgmt_status(status));
3356 mgmt_pending_remove(cmd);
3359 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3362 struct mgmt_cp_pair_device *cp = data;
3363 struct mgmt_rp_pair_device rp;
3364 struct mgmt_pending_cmd *cmd;
3365 u8 sec_level, auth_type;
3366 struct hci_conn *conn;
3371 memset(&rp, 0, sizeof(rp));
3372 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3373 rp.addr.type = cp->addr.type;
3375 if (!bdaddr_type_is_valid(cp->addr.type))
3376 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3377 MGMT_STATUS_INVALID_PARAMS,
3380 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3381 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3382 MGMT_STATUS_INVALID_PARAMS,
3387 if (!hdev_is_powered(hdev)) {
3388 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3389 MGMT_STATUS_NOT_POWERED, &rp,
3394 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3395 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3396 MGMT_STATUS_ALREADY_PAIRED, &rp,
3401 sec_level = BT_SECURITY_MEDIUM;
3402 auth_type = HCI_AT_DEDICATED_BONDING;
3404 if (cp->addr.type == BDADDR_BREDR) {
3405 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3410 /* Convert from L2CAP channel address type to HCI address type
3412 if (cp->addr.type == BDADDR_LE_PUBLIC)
3413 addr_type = ADDR_LE_DEV_PUBLIC;
3415 addr_type = ADDR_LE_DEV_RANDOM;
3417 /* When pairing a new device, it is expected to remember
3418 * this device for future connections. Adding the connection
3419 * parameter information ahead of time allows tracking
3420 * of the slave preferred values and will speed up any
3421 * further connection establishment.
3423 * If connection parameters already exist, then they
3424 * will be kept and this function does nothing.
3426 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3428 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3429 sec_level, HCI_LE_CONN_TIMEOUT,
3436 if (PTR_ERR(conn) == -EBUSY)
3437 status = MGMT_STATUS_BUSY;
3438 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3439 status = MGMT_STATUS_NOT_SUPPORTED;
3440 else if (PTR_ERR(conn) == -ECONNREFUSED)
3441 status = MGMT_STATUS_REJECTED;
3443 status = MGMT_STATUS_CONNECT_FAILED;
3445 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3446 status, &rp, sizeof(rp));
3450 if (conn->connect_cfm_cb) {
3451 hci_conn_drop(conn);
3452 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3453 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3457 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3460 hci_conn_drop(conn);
3464 cmd->cmd_complete = pairing_complete;
3466 /* For LE, just connecting isn't a proof that the pairing finished */
3467 if (cp->addr.type == BDADDR_BREDR) {
3468 conn->connect_cfm_cb = pairing_complete_cb;
3469 conn->security_cfm_cb = pairing_complete_cb;
3470 conn->disconn_cfm_cb = pairing_complete_cb;
3472 conn->connect_cfm_cb = le_pairing_complete_cb;
3473 conn->security_cfm_cb = le_pairing_complete_cb;
3474 conn->disconn_cfm_cb = le_pairing_complete_cb;
3477 conn->io_capability = cp->io_cap;
3478 cmd->user_data = hci_conn_get(conn);
3480 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3481 hci_conn_security(conn, sec_level, auth_type, true)) {
3482 cmd->cmd_complete(cmd, 0);
3483 mgmt_pending_remove(cmd);
3489 hci_dev_unlock(hdev);
3493 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3496 struct mgmt_addr_info *addr = data;
3497 struct mgmt_pending_cmd *cmd;
3498 struct hci_conn *conn;
3505 if (!hdev_is_powered(hdev)) {
3506 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3507 MGMT_STATUS_NOT_POWERED);
3511 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3513 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3514 MGMT_STATUS_INVALID_PARAMS);
3518 conn = cmd->user_data;
3520 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3521 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3522 MGMT_STATUS_INVALID_PARAMS);
3526 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3527 mgmt_pending_remove(cmd);
3529 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3530 addr, sizeof(*addr));
3532 hci_dev_unlock(hdev);
3536 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3537 struct mgmt_addr_info *addr, u16 mgmt_op,
3538 u16 hci_op, __le32 passkey)
3540 struct mgmt_pending_cmd *cmd;
3541 struct hci_conn *conn;
3546 if (!hdev_is_powered(hdev)) {
3547 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3548 MGMT_STATUS_NOT_POWERED, addr,
3553 if (addr->type == BDADDR_BREDR)
3554 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3556 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3559 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3560 MGMT_STATUS_NOT_CONNECTED, addr,
3565 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3566 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3568 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3569 MGMT_STATUS_SUCCESS, addr,
3572 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3573 MGMT_STATUS_FAILED, addr,
3579 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3585 cmd->cmd_complete = addr_cmd_complete;
3587 /* Continue with pairing via HCI */
3588 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3589 struct hci_cp_user_passkey_reply cp;
3591 bacpy(&cp.bdaddr, &addr->bdaddr);
3592 cp.passkey = passkey;
3593 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3595 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3599 mgmt_pending_remove(cmd);
3602 hci_dev_unlock(hdev);
3606 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3607 void *data, u16 len)
3609 struct mgmt_cp_pin_code_neg_reply *cp = data;
3613 return user_pairing_resp(sk, hdev, &cp->addr,
3614 MGMT_OP_PIN_CODE_NEG_REPLY,
3615 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3618 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3621 struct mgmt_cp_user_confirm_reply *cp = data;
3625 if (len != sizeof(*cp))
3626 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3627 MGMT_STATUS_INVALID_PARAMS);
3629 return user_pairing_resp(sk, hdev, &cp->addr,
3630 MGMT_OP_USER_CONFIRM_REPLY,
3631 HCI_OP_USER_CONFIRM_REPLY, 0);
3634 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3635 void *data, u16 len)
3637 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3641 return user_pairing_resp(sk, hdev, &cp->addr,
3642 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3643 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3646 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3649 struct mgmt_cp_user_passkey_reply *cp = data;
3653 return user_pairing_resp(sk, hdev, &cp->addr,
3654 MGMT_OP_USER_PASSKEY_REPLY,
3655 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3658 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3659 void *data, u16 len)
3661 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3665 return user_pairing_resp(sk, hdev, &cp->addr,
3666 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3667 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3670 static void update_name(struct hci_request *req)
3672 struct hci_dev *hdev = req->hdev;
3673 struct hci_cp_write_local_name cp;
3675 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3677 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3680 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3682 struct mgmt_cp_set_local_name *cp;
3683 struct mgmt_pending_cmd *cmd;
3685 BT_DBG("status 0x%02x", status);
3689 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3696 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3697 mgmt_status(status));
3699 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3702 mgmt_pending_remove(cmd);
3705 hci_dev_unlock(hdev);
3708 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3711 struct mgmt_cp_set_local_name *cp = data;
3712 struct mgmt_pending_cmd *cmd;
3713 struct hci_request req;
3720 /* If the old values are the same as the new ones just return a
3721 * direct command complete event.
3723 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3724 !memcmp(hdev->short_name, cp->short_name,
3725 sizeof(hdev->short_name))) {
3726 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3731 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3733 if (!hdev_is_powered(hdev)) {
3734 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3736 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3741 err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3747 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3753 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3755 hci_req_init(&req, hdev);
3757 if (lmp_bredr_capable(hdev)) {
3762 /* The name is stored in the scan response data and so
3763 * no need to udpate the advertising data here.
3765 if (lmp_le_capable(hdev))
3766 update_scan_rsp_data(&req);
3768 err = hci_req_run(&req, set_name_complete);
3770 mgmt_pending_remove(cmd);
3773 hci_dev_unlock(hdev);
3777 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3778 void *data, u16 data_len)
3780 struct mgmt_pending_cmd *cmd;
3783 BT_DBG("%s", hdev->name);
3787 if (!hdev_is_powered(hdev)) {
3788 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3789 MGMT_STATUS_NOT_POWERED);
3793 if (!lmp_ssp_capable(hdev)) {
3794 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3795 MGMT_STATUS_NOT_SUPPORTED);
3799 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3800 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3805 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3811 if (bredr_sc_enabled(hdev))
3812 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3815 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3818 mgmt_pending_remove(cmd);
3821 hci_dev_unlock(hdev);
3825 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3826 void *data, u16 len)
3828 struct mgmt_addr_info *addr = data;
3831 BT_DBG("%s ", hdev->name);
3833 if (!bdaddr_type_is_valid(addr->type))
3834 return mgmt_cmd_complete(sk, hdev->id,
3835 MGMT_OP_ADD_REMOTE_OOB_DATA,
3836 MGMT_STATUS_INVALID_PARAMS,
3837 addr, sizeof(*addr));
3841 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3842 struct mgmt_cp_add_remote_oob_data *cp = data;
3845 if (cp->addr.type != BDADDR_BREDR) {
3846 err = mgmt_cmd_complete(sk, hdev->id,
3847 MGMT_OP_ADD_REMOTE_OOB_DATA,
3848 MGMT_STATUS_INVALID_PARAMS,
3849 &cp->addr, sizeof(cp->addr));
3853 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3854 cp->addr.type, cp->hash,
3855 cp->rand, NULL, NULL);
3857 status = MGMT_STATUS_FAILED;
3859 status = MGMT_STATUS_SUCCESS;
3861 err = mgmt_cmd_complete(sk, hdev->id,
3862 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3863 &cp->addr, sizeof(cp->addr));
3864 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3865 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3866 u8 *rand192, *hash192, *rand256, *hash256;
3869 if (bdaddr_type_is_le(cp->addr.type)) {
3870 /* Enforce zero-valued 192-bit parameters as
3871 * long as legacy SMP OOB isn't implemented.
3873 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3874 memcmp(cp->hash192, ZERO_KEY, 16)) {
3875 err = mgmt_cmd_complete(sk, hdev->id,
3876 MGMT_OP_ADD_REMOTE_OOB_DATA,
3877 MGMT_STATUS_INVALID_PARAMS,
3878 addr, sizeof(*addr));
3885 /* In case one of the P-192 values is set to zero,
3886 * then just disable OOB data for P-192.
3888 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3889 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3893 rand192 = cp->rand192;
3894 hash192 = cp->hash192;
3898 /* In case one of the P-256 values is set to zero, then just
3899 * disable OOB data for P-256.
3901 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3902 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3906 rand256 = cp->rand256;
3907 hash256 = cp->hash256;
3910 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3911 cp->addr.type, hash192, rand192,
3914 status = MGMT_STATUS_FAILED;
3916 status = MGMT_STATUS_SUCCESS;
3918 err = mgmt_cmd_complete(sk, hdev->id,
3919 MGMT_OP_ADD_REMOTE_OOB_DATA,
3920 status, &cp->addr, sizeof(cp->addr));
3922 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3923 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3924 MGMT_STATUS_INVALID_PARAMS);
3928 hci_dev_unlock(hdev);
3932 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3933 void *data, u16 len)
3935 struct mgmt_cp_remove_remote_oob_data *cp = data;
3939 BT_DBG("%s", hdev->name);
3941 if (cp->addr.type != BDADDR_BREDR)
3942 return mgmt_cmd_complete(sk, hdev->id,
3943 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3944 MGMT_STATUS_INVALID_PARAMS,
3945 &cp->addr, sizeof(cp->addr));
3949 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3950 hci_remote_oob_data_clear(hdev);
3951 status = MGMT_STATUS_SUCCESS;
3955 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3957 status = MGMT_STATUS_INVALID_PARAMS;
3959 status = MGMT_STATUS_SUCCESS;
3962 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3963 status, &cp->addr, sizeof(cp->addr));
3965 hci_dev_unlock(hdev);
3969 static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
3971 struct hci_dev *hdev = req->hdev;
3972 struct hci_cp_inquiry cp;
3973 /* General inquiry access code (GIAC) */
3974 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3976 *status = mgmt_bredr_support(hdev);
3980 if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
3981 *status = MGMT_STATUS_BUSY;
3985 hci_inquiry_cache_flush(hdev);
3987 memset(&cp, 0, sizeof(cp));
3988 memcpy(&cp.lap, lap, sizeof(cp.lap));
3989 cp.length = DISCOV_BREDR_INQUIRY_LEN;
3991 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3996 static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
3998 struct hci_dev *hdev = req->hdev;
3999 struct hci_cp_le_set_scan_param param_cp;
4000 struct hci_cp_le_set_scan_enable enable_cp;
4004 *status = mgmt_le_support(hdev);
4008 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
4009 /* Don't let discovery abort an outgoing connection attempt
4010 * that's using directed advertising.
4012 if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
4013 *status = MGMT_STATUS_REJECTED;
4017 disable_advertising(req);
4020 /* If controller is scanning, it means the background scanning is
4021 * running. Thus, we should temporarily stop it in order to set the
4022 * discovery scanning parameters.
4024 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
4025 hci_req_add_le_scan_disable(req);
4027 /* All active scans will be done with either a resolvable private
4028 * address (when privacy feature has been enabled) or non-resolvable
4031 err = hci_update_random_address(req, true, &own_addr_type);
4033 *status = MGMT_STATUS_FAILED;
4037 memset(¶m_cp, 0, sizeof(param_cp));
4038 param_cp.type = LE_SCAN_ACTIVE;
4039 param_cp.interval = cpu_to_le16(interval);
4040 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
4041 param_cp.own_address_type = own_addr_type;
4043 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
4046 memset(&enable_cp, 0, sizeof(enable_cp));
4047 enable_cp.enable = LE_SCAN_ENABLE;
4048 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4050 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
4056 static bool trigger_discovery(struct hci_request *req, u8 *status)
4058 struct hci_dev *hdev = req->hdev;
4060 switch (hdev->discovery.type) {
4061 case DISCOV_TYPE_BREDR:
4062 if (!trigger_bredr_inquiry(req, status))
4066 case DISCOV_TYPE_INTERLEAVED:
4067 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
4069 /* During simultaneous discovery, we double LE scan
4070 * interval. We must leave some time for the controller
4071 * to do BR/EDR inquiry.
4073 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
4077 if (!trigger_bredr_inquiry(req, status))
4083 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4084 *status = MGMT_STATUS_NOT_SUPPORTED;
4089 case DISCOV_TYPE_LE:
4090 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
4095 *status = MGMT_STATUS_INVALID_PARAMS;
4102 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
4105 struct mgmt_pending_cmd *cmd;
4106 unsigned long timeout;
4108 BT_DBG("status %d", status);
4112 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4114 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4117 cmd->cmd_complete(cmd, mgmt_status(status));
4118 mgmt_pending_remove(cmd);
4122 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4126 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
4128 /* If the scan involves LE scan, pick proper timeout to schedule
4129 * hdev->le_scan_disable that will stop it.
4131 switch (hdev->discovery.type) {
4132 case DISCOV_TYPE_LE:
4133 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4135 case DISCOV_TYPE_INTERLEAVED:
4136 /* When running simultaneous discovery, the LE scanning time
4137 * should occupy the whole discovery time sine BR/EDR inquiry
4138 * and LE scanning are scheduled by the controller.
4140 * For interleaving discovery in comparison, BR/EDR inquiry
4141 * and LE scanning are done sequentially with separate
4144 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
4145 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4147 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4149 case DISCOV_TYPE_BREDR:
4153 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4159 /* When service discovery is used and the controller has
4160 * a strict duplicate filter, it is important to remember
4161 * the start and duration of the scan. This is required
4162 * for restarting scanning during the discovery phase.
4164 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
4166 hdev->discovery.result_filtering) {
4167 hdev->discovery.scan_start = jiffies;
4168 hdev->discovery.scan_duration = timeout;
4171 queue_delayed_work(hdev->workqueue,
4172 &hdev->le_scan_disable, timeout);
4176 hci_dev_unlock(hdev);
4179 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4180 void *data, u16 len)
4182 struct mgmt_cp_start_discovery *cp = data;
4183 struct mgmt_pending_cmd *cmd;
4184 struct hci_request req;
4188 BT_DBG("%s", hdev->name);
4192 if (!hdev_is_powered(hdev)) {
4193 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4194 MGMT_STATUS_NOT_POWERED,
4195 &cp->type, sizeof(cp->type));
4199 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4200 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4201 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4202 MGMT_STATUS_BUSY, &cp->type,
4207 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4213 cmd->cmd_complete = generic_cmd_complete;
4215 /* Clear the discovery filter first to free any previously
4216 * allocated memory for the UUID list.
4218 hci_discovery_filter_clear(hdev);
4220 hdev->discovery.type = cp->type;
4221 hdev->discovery.report_invalid_rssi = false;
4223 hci_req_init(&req, hdev);
4225 if (!trigger_discovery(&req, &status)) {
4226 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4227 status, &cp->type, sizeof(cp->type));
4228 mgmt_pending_remove(cmd);
4232 err = hci_req_run(&req, start_discovery_complete);
4234 mgmt_pending_remove(cmd);
4238 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4241 hci_dev_unlock(hdev);
4245 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4248 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4252 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4253 void *data, u16 len)
4255 struct mgmt_cp_start_service_discovery *cp = data;
4256 struct mgmt_pending_cmd *cmd;
4257 struct hci_request req;
4258 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4259 u16 uuid_count, expected_len;
4263 BT_DBG("%s", hdev->name);
4267 if (!hdev_is_powered(hdev)) {
4268 err = mgmt_cmd_complete(sk, hdev->id,
4269 MGMT_OP_START_SERVICE_DISCOVERY,
4270 MGMT_STATUS_NOT_POWERED,
4271 &cp->type, sizeof(cp->type));
4275 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4276 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4277 err = mgmt_cmd_complete(sk, hdev->id,
4278 MGMT_OP_START_SERVICE_DISCOVERY,
4279 MGMT_STATUS_BUSY, &cp->type,
4284 uuid_count = __le16_to_cpu(cp->uuid_count);
4285 if (uuid_count > max_uuid_count) {
4286 BT_ERR("service_discovery: too big uuid_count value %u",
4288 err = mgmt_cmd_complete(sk, hdev->id,
4289 MGMT_OP_START_SERVICE_DISCOVERY,
4290 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4295 expected_len = sizeof(*cp) + uuid_count * 16;
4296 if (expected_len != len) {
4297 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4299 err = mgmt_cmd_complete(sk, hdev->id,
4300 MGMT_OP_START_SERVICE_DISCOVERY,
4301 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4306 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4313 cmd->cmd_complete = service_discovery_cmd_complete;
4315 /* Clear the discovery filter first to free any previously
4316 * allocated memory for the UUID list.
4318 hci_discovery_filter_clear(hdev);
4320 hdev->discovery.result_filtering = true;
4321 hdev->discovery.type = cp->type;
4322 hdev->discovery.rssi = cp->rssi;
4323 hdev->discovery.uuid_count = uuid_count;
4325 if (uuid_count > 0) {
4326 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4328 if (!hdev->discovery.uuids) {
4329 err = mgmt_cmd_complete(sk, hdev->id,
4330 MGMT_OP_START_SERVICE_DISCOVERY,
4332 &cp->type, sizeof(cp->type));
4333 mgmt_pending_remove(cmd);
4338 hci_req_init(&req, hdev);
4340 if (!trigger_discovery(&req, &status)) {
4341 err = mgmt_cmd_complete(sk, hdev->id,
4342 MGMT_OP_START_SERVICE_DISCOVERY,
4343 status, &cp->type, sizeof(cp->type));
4344 mgmt_pending_remove(cmd);
4348 err = hci_req_run(&req, start_discovery_complete);
4350 mgmt_pending_remove(cmd);
4354 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4357 hci_dev_unlock(hdev);
4361 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4363 struct mgmt_pending_cmd *cmd;
4365 BT_DBG("status %d", status);
4369 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4371 cmd->cmd_complete(cmd, mgmt_status(status));
4372 mgmt_pending_remove(cmd);
4376 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4378 hci_dev_unlock(hdev);
4381 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4384 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4385 struct mgmt_pending_cmd *cmd;
4386 struct hci_request req;
4389 BT_DBG("%s", hdev->name);
4393 if (!hci_discovery_active(hdev)) {
4394 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4395 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4396 sizeof(mgmt_cp->type));
4400 if (hdev->discovery.type != mgmt_cp->type) {
4401 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4402 MGMT_STATUS_INVALID_PARAMS,
4403 &mgmt_cp->type, sizeof(mgmt_cp->type));
4407 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4413 cmd->cmd_complete = generic_cmd_complete;
4415 hci_req_init(&req, hdev);
4417 hci_stop_discovery(&req);
4419 err = hci_req_run(&req, stop_discovery_complete);
4421 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4425 mgmt_pending_remove(cmd);
4427 /* If no HCI commands were sent we're done */
4428 if (err == -ENODATA) {
4429 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4430 &mgmt_cp->type, sizeof(mgmt_cp->type));
4431 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4435 hci_dev_unlock(hdev);
4439 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4442 struct mgmt_cp_confirm_name *cp = data;
4443 struct inquiry_entry *e;
4446 BT_DBG("%s", hdev->name);
4450 if (!hci_discovery_active(hdev)) {
4451 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4452 MGMT_STATUS_FAILED, &cp->addr,
4457 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4459 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4460 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4465 if (cp->name_known) {
4466 e->name_state = NAME_KNOWN;
4469 e->name_state = NAME_NEEDED;
4470 hci_inquiry_cache_update_resolve(hdev, e);
4473 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4474 &cp->addr, sizeof(cp->addr));
4477 hci_dev_unlock(hdev);
4481 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4484 struct mgmt_cp_block_device *cp = data;
4488 BT_DBG("%s", hdev->name);
4490 if (!bdaddr_type_is_valid(cp->addr.type))
4491 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4492 MGMT_STATUS_INVALID_PARAMS,
4493 &cp->addr, sizeof(cp->addr));
4497 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4500 status = MGMT_STATUS_FAILED;
4504 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4506 status = MGMT_STATUS_SUCCESS;
4509 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4510 &cp->addr, sizeof(cp->addr));
4512 hci_dev_unlock(hdev);
4517 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4520 struct mgmt_cp_unblock_device *cp = data;
4524 BT_DBG("%s", hdev->name);
4526 if (!bdaddr_type_is_valid(cp->addr.type))
4527 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4528 MGMT_STATUS_INVALID_PARAMS,
4529 &cp->addr, sizeof(cp->addr));
4533 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4536 status = MGMT_STATUS_INVALID_PARAMS;
4540 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4542 status = MGMT_STATUS_SUCCESS;
4545 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4546 &cp->addr, sizeof(cp->addr));
4548 hci_dev_unlock(hdev);
4553 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4556 struct mgmt_cp_set_device_id *cp = data;
4557 struct hci_request req;
4561 BT_DBG("%s", hdev->name);
4563 source = __le16_to_cpu(cp->source);
4565 if (source > 0x0002)
4566 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4567 MGMT_STATUS_INVALID_PARAMS);
4571 hdev->devid_source = source;
4572 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4573 hdev->devid_product = __le16_to_cpu(cp->product);
4574 hdev->devid_version = __le16_to_cpu(cp->version);
4576 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4579 hci_req_init(&req, hdev);
4581 hci_req_run(&req, NULL);
4583 hci_dev_unlock(hdev);
4588 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4591 BT_DBG("status %d", status);
4594 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4597 struct cmd_lookup match = { NULL, hdev };
4598 struct hci_request req;
4603 u8 mgmt_err = mgmt_status(status);
4605 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4606 cmd_status_rsp, &mgmt_err);
4610 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4611 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4613 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4615 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4618 new_settings(hdev, match.sk);
4623 /* If "Set Advertising" was just disabled and instance advertising was
4624 * set up earlier, then enable the advertising instance.
4626 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4627 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
4630 hci_req_init(&req, hdev);
4632 update_adv_data(&req);
4633 enable_advertising(&req);
4635 if (hci_req_run(&req, enable_advertising_instance) < 0)
4636 BT_ERR("Failed to re-configure advertising");
4639 hci_dev_unlock(hdev);
4642 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4645 struct mgmt_mode *cp = data;
4646 struct mgmt_pending_cmd *cmd;
4647 struct hci_request req;
4651 BT_DBG("request for %s", hdev->name);
4653 status = mgmt_le_support(hdev);
4655 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4658 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4659 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4660 MGMT_STATUS_INVALID_PARAMS);
4666 /* The following conditions are ones which mean that we should
4667 * not do any HCI communication but directly send a mgmt
4668 * response to user space (after toggling the flag if
4671 if (!hdev_is_powered(hdev) ||
4672 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4673 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4674 hci_conn_num(hdev, LE_LINK) > 0 ||
4675 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4676 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4680 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4681 if (cp->val == 0x02)
4682 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4684 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4686 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4687 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4690 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4695 err = new_settings(hdev, sk);
4700 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4701 pending_find(MGMT_OP_SET_LE, hdev)) {
4702 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4707 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4713 hci_req_init(&req, hdev);
4715 if (cp->val == 0x02)
4716 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4718 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4721 /* Switch to instance "0" for the Set Advertising setting. */
4722 update_adv_data_for_instance(&req, 0);
4723 update_scan_rsp_data_for_instance(&req, 0);
4724 enable_advertising(&req);
4726 disable_advertising(&req);
4729 err = hci_req_run(&req, set_advertising_complete);
4731 mgmt_pending_remove(cmd);
4734 hci_dev_unlock(hdev);
4738 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4739 void *data, u16 len)
4741 struct mgmt_cp_set_static_address *cp = data;
4744 BT_DBG("%s", hdev->name);
4746 if (!lmp_le_capable(hdev))
4747 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4748 MGMT_STATUS_NOT_SUPPORTED);
4750 if (hdev_is_powered(hdev))
4751 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4752 MGMT_STATUS_REJECTED);
4754 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4755 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4756 return mgmt_cmd_status(sk, hdev->id,
4757 MGMT_OP_SET_STATIC_ADDRESS,
4758 MGMT_STATUS_INVALID_PARAMS);
4760 /* Two most significant bits shall be set */
4761 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4762 return mgmt_cmd_status(sk, hdev->id,
4763 MGMT_OP_SET_STATIC_ADDRESS,
4764 MGMT_STATUS_INVALID_PARAMS);
4769 bacpy(&hdev->static_addr, &cp->bdaddr);
4771 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4775 err = new_settings(hdev, sk);
4778 hci_dev_unlock(hdev);
4782 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4783 void *data, u16 len)
4785 struct mgmt_cp_set_scan_params *cp = data;
4786 __u16 interval, window;
4789 BT_DBG("%s", hdev->name);
4791 if (!lmp_le_capable(hdev))
4792 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4793 MGMT_STATUS_NOT_SUPPORTED);
4795 interval = __le16_to_cpu(cp->interval);
4797 if (interval < 0x0004 || interval > 0x4000)
4798 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4799 MGMT_STATUS_INVALID_PARAMS);
4801 window = __le16_to_cpu(cp->window);
4803 if (window < 0x0004 || window > 0x4000)
4804 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4805 MGMT_STATUS_INVALID_PARAMS);
4807 if (window > interval)
4808 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4809 MGMT_STATUS_INVALID_PARAMS);
4813 hdev->le_scan_interval = interval;
4814 hdev->le_scan_window = window;
4816 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4819 /* If background scan is running, restart it so new parameters are
4822 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4823 hdev->discovery.state == DISCOVERY_STOPPED) {
4824 struct hci_request req;
4826 hci_req_init(&req, hdev);
4828 hci_req_add_le_scan_disable(&req);
4829 hci_req_add_le_passive_scan(&req);
4831 hci_req_run(&req, NULL);
4834 hci_dev_unlock(hdev);
4839 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4842 struct mgmt_pending_cmd *cmd;
4844 BT_DBG("status 0x%02x", status);
4848 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4853 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4854 mgmt_status(status));
4856 struct mgmt_mode *cp = cmd->param;
4859 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4861 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4863 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4864 new_settings(hdev, cmd->sk);
4867 mgmt_pending_remove(cmd);
4870 hci_dev_unlock(hdev);
4873 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4874 void *data, u16 len)
4876 struct mgmt_mode *cp = data;
4877 struct mgmt_pending_cmd *cmd;
4878 struct hci_request req;
4881 BT_DBG("%s", hdev->name);
4883 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4884 hdev->hci_ver < BLUETOOTH_VER_1_2)
4885 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4886 MGMT_STATUS_NOT_SUPPORTED);
4888 if (cp->val != 0x00 && cp->val != 0x01)
4889 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4890 MGMT_STATUS_INVALID_PARAMS);
4894 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4895 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4900 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4901 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4906 if (!hdev_is_powered(hdev)) {
4907 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4908 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4910 new_settings(hdev, sk);
4914 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4921 hci_req_init(&req, hdev);
4923 write_fast_connectable(&req, cp->val);
4925 err = hci_req_run(&req, fast_connectable_complete);
4927 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4928 MGMT_STATUS_FAILED);
4929 mgmt_pending_remove(cmd);
4933 hci_dev_unlock(hdev);
4938 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4940 struct mgmt_pending_cmd *cmd;
4942 BT_DBG("status 0x%02x", status);
4946 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4951 u8 mgmt_err = mgmt_status(status);
4953 /* We need to restore the flag if related HCI commands
4956 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4958 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4960 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4961 new_settings(hdev, cmd->sk);
4964 mgmt_pending_remove(cmd);
4967 hci_dev_unlock(hdev);
4970 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4972 struct mgmt_mode *cp = data;
4973 struct mgmt_pending_cmd *cmd;
4974 struct hci_request req;
4977 BT_DBG("request for %s", hdev->name);
4979 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4980 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4981 MGMT_STATUS_NOT_SUPPORTED);
4983 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4984 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4985 MGMT_STATUS_REJECTED);
4987 if (cp->val != 0x00 && cp->val != 0x01)
4988 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4989 MGMT_STATUS_INVALID_PARAMS);
4993 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4994 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4998 if (!hdev_is_powered(hdev)) {
5000 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5001 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5002 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5003 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5004 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5007 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5009 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5013 err = new_settings(hdev, sk);
5017 /* Reject disabling when powered on */
5019 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5020 MGMT_STATUS_REJECTED);
5023 /* When configuring a dual-mode controller to operate
5024 * with LE only and using a static address, then switching
5025 * BR/EDR back on is not allowed.
5027 * Dual-mode controllers shall operate with the public
5028 * address as its identity address for BR/EDR and LE. So
5029 * reject the attempt to create an invalid configuration.
5031 * The same restrictions applies when secure connections
5032 * has been enabled. For BR/EDR this is a controller feature
5033 * while for LE it is a host stack feature. This means that
5034 * switching BR/EDR back on when secure connections has been
5035 * enabled is not a supported transaction.
5037 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5038 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5039 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5040 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5041 MGMT_STATUS_REJECTED);
5046 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5047 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5052 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5058 /* We need to flip the bit already here so that update_adv_data
5059 * generates the correct flags.
5061 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5063 hci_req_init(&req, hdev);
5065 write_fast_connectable(&req, false);
5066 __hci_update_page_scan(&req);
5068 /* Since only the advertising data flags will change, there
5069 * is no need to update the scan response data.
5071 update_adv_data(&req);
5073 err = hci_req_run(&req, set_bredr_complete);
5075 mgmt_pending_remove(cmd);
5078 hci_dev_unlock(hdev);
5082 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5084 struct mgmt_pending_cmd *cmd;
5085 struct mgmt_mode *cp;
5087 BT_DBG("%s status %u", hdev->name, status);
5091 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5096 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5097 mgmt_status(status));
5105 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5106 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5109 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5110 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5113 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5114 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5118 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5119 new_settings(hdev, cmd->sk);
5122 mgmt_pending_remove(cmd);
5124 hci_dev_unlock(hdev);
5127 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5128 void *data, u16 len)
5130 struct mgmt_mode *cp = data;
5131 struct mgmt_pending_cmd *cmd;
5132 struct hci_request req;
5136 BT_DBG("request for %s", hdev->name);
5138 if (!lmp_sc_capable(hdev) &&
5139 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5140 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5141 MGMT_STATUS_NOT_SUPPORTED);
5143 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5144 lmp_sc_capable(hdev) &&
5145 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5146 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5147 MGMT_STATUS_REJECTED);
5149 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5150 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5151 MGMT_STATUS_INVALID_PARAMS);
5155 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5156 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5160 changed = !hci_dev_test_and_set_flag(hdev,
5162 if (cp->val == 0x02)
5163 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5165 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5167 changed = hci_dev_test_and_clear_flag(hdev,
5169 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5172 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5177 err = new_settings(hdev, sk);
5182 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5183 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5190 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5191 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5192 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5196 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5202 hci_req_init(&req, hdev);
5203 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5204 err = hci_req_run(&req, sc_enable_complete);
5206 mgmt_pending_remove(cmd);
5211 hci_dev_unlock(hdev);
5215 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5216 void *data, u16 len)
5218 struct mgmt_mode *cp = data;
5219 bool changed, use_changed;
5222 BT_DBG("request for %s", hdev->name);
5224 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5225 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5226 MGMT_STATUS_INVALID_PARAMS);
5231 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5233 changed = hci_dev_test_and_clear_flag(hdev,
5234 HCI_KEEP_DEBUG_KEYS);
5236 if (cp->val == 0x02)
5237 use_changed = !hci_dev_test_and_set_flag(hdev,
5238 HCI_USE_DEBUG_KEYS);
5240 use_changed = hci_dev_test_and_clear_flag(hdev,
5241 HCI_USE_DEBUG_KEYS);
5243 if (hdev_is_powered(hdev) && use_changed &&
5244 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5245 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5246 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5247 sizeof(mode), &mode);
5250 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5255 err = new_settings(hdev, sk);
5258 hci_dev_unlock(hdev);
5262 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5265 struct mgmt_cp_set_privacy *cp = cp_data;
5269 BT_DBG("request for %s", hdev->name);
5271 if (!lmp_le_capable(hdev))
5272 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5273 MGMT_STATUS_NOT_SUPPORTED);
5275 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5276 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5277 MGMT_STATUS_INVALID_PARAMS);
5279 if (hdev_is_powered(hdev))
5280 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5281 MGMT_STATUS_REJECTED);
5285 /* If user space supports this command it is also expected to
5286 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5288 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5291 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5292 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5293 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5295 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5296 memset(hdev->irk, 0, sizeof(hdev->irk));
5297 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5300 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5305 err = new_settings(hdev, sk);
5308 hci_dev_unlock(hdev);
5312 static bool irk_is_valid(struct mgmt_irk_info *irk)
5314 switch (irk->addr.type) {
5315 case BDADDR_LE_PUBLIC:
5318 case BDADDR_LE_RANDOM:
5319 /* Two most significant bits shall be set */
5320 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5328 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5331 struct mgmt_cp_load_irks *cp = cp_data;
5332 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5333 sizeof(struct mgmt_irk_info));
5334 u16 irk_count, expected_len;
5337 BT_DBG("request for %s", hdev->name);
5339 if (!lmp_le_capable(hdev))
5340 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5341 MGMT_STATUS_NOT_SUPPORTED);
5343 irk_count = __le16_to_cpu(cp->irk_count);
5344 if (irk_count > max_irk_count) {
5345 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5346 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5347 MGMT_STATUS_INVALID_PARAMS);
5350 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5351 if (expected_len != len) {
5352 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5354 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5355 MGMT_STATUS_INVALID_PARAMS);
5358 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5360 for (i = 0; i < irk_count; i++) {
5361 struct mgmt_irk_info *key = &cp->irks[i];
5363 if (!irk_is_valid(key))
5364 return mgmt_cmd_status(sk, hdev->id,
5366 MGMT_STATUS_INVALID_PARAMS);
5371 hci_smp_irks_clear(hdev);
5373 for (i = 0; i < irk_count; i++) {
5374 struct mgmt_irk_info *irk = &cp->irks[i];
5377 if (irk->addr.type == BDADDR_LE_PUBLIC)
5378 addr_type = ADDR_LE_DEV_PUBLIC;
5380 addr_type = ADDR_LE_DEV_RANDOM;
5382 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5386 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5388 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5390 hci_dev_unlock(hdev);
5395 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5397 if (key->master != 0x00 && key->master != 0x01)
5400 switch (key->addr.type) {
5401 case BDADDR_LE_PUBLIC:
5404 case BDADDR_LE_RANDOM:
5405 /* Two most significant bits shall be set */
5406 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5414 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5415 void *cp_data, u16 len)
5417 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5418 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5419 sizeof(struct mgmt_ltk_info));
5420 u16 key_count, expected_len;
5423 BT_DBG("request for %s", hdev->name);
5425 if (!lmp_le_capable(hdev))
5426 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5427 MGMT_STATUS_NOT_SUPPORTED);
5429 key_count = __le16_to_cpu(cp->key_count);
5430 if (key_count > max_key_count) {
5431 BT_ERR("load_ltks: too big key_count value %u", key_count);
5432 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5433 MGMT_STATUS_INVALID_PARAMS);
5436 expected_len = sizeof(*cp) + key_count *
5437 sizeof(struct mgmt_ltk_info);
5438 if (expected_len != len) {
5439 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5441 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5442 MGMT_STATUS_INVALID_PARAMS);
5445 BT_DBG("%s key_count %u", hdev->name, key_count);
5447 for (i = 0; i < key_count; i++) {
5448 struct mgmt_ltk_info *key = &cp->keys[i];
5450 if (!ltk_is_valid(key))
5451 return mgmt_cmd_status(sk, hdev->id,
5452 MGMT_OP_LOAD_LONG_TERM_KEYS,
5453 MGMT_STATUS_INVALID_PARAMS);
5458 hci_smp_ltks_clear(hdev);
5460 for (i = 0; i < key_count; i++) {
5461 struct mgmt_ltk_info *key = &cp->keys[i];
5462 u8 type, addr_type, authenticated;
5464 if (key->addr.type == BDADDR_LE_PUBLIC)
5465 addr_type = ADDR_LE_DEV_PUBLIC;
5467 addr_type = ADDR_LE_DEV_RANDOM;
5469 switch (key->type) {
5470 case MGMT_LTK_UNAUTHENTICATED:
5471 authenticated = 0x00;
5472 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5474 case MGMT_LTK_AUTHENTICATED:
5475 authenticated = 0x01;
5476 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5478 case MGMT_LTK_P256_UNAUTH:
5479 authenticated = 0x00;
5480 type = SMP_LTK_P256;
5482 case MGMT_LTK_P256_AUTH:
5483 authenticated = 0x01;
5484 type = SMP_LTK_P256;
5486 case MGMT_LTK_P256_DEBUG:
5487 authenticated = 0x00;
5488 type = SMP_LTK_P256_DEBUG;
5493 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5494 authenticated, key->val, key->enc_size, key->ediv,
5498 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5501 hci_dev_unlock(hdev);
5506 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5508 struct hci_conn *conn = cmd->user_data;
5509 struct mgmt_rp_get_conn_info rp;
5512 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5514 if (status == MGMT_STATUS_SUCCESS) {
5515 rp.rssi = conn->rssi;
5516 rp.tx_power = conn->tx_power;
5517 rp.max_tx_power = conn->max_tx_power;
5519 rp.rssi = HCI_RSSI_INVALID;
5520 rp.tx_power = HCI_TX_POWER_INVALID;
5521 rp.max_tx_power = HCI_TX_POWER_INVALID;
5524 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5525 status, &rp, sizeof(rp));
5527 hci_conn_drop(conn);
5533 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5536 struct hci_cp_read_rssi *cp;
5537 struct mgmt_pending_cmd *cmd;
5538 struct hci_conn *conn;
5542 BT_DBG("status 0x%02x", hci_status);
5546 /* Commands sent in request are either Read RSSI or Read Transmit Power
5547 * Level so we check which one was last sent to retrieve connection
5548 * handle. Both commands have handle as first parameter so it's safe to
5549 * cast data on the same command struct.
5551 * First command sent is always Read RSSI and we fail only if it fails.
5552 * In other case we simply override error to indicate success as we
5553 * already remembered if TX power value is actually valid.
5555 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5557 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5558 status = MGMT_STATUS_SUCCESS;
5560 status = mgmt_status(hci_status);
5564 BT_ERR("invalid sent_cmd in conn_info response");
5568 handle = __le16_to_cpu(cp->handle);
5569 conn = hci_conn_hash_lookup_handle(hdev, handle);
5571 BT_ERR("unknown handle (%d) in conn_info response", handle);
5575 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5579 cmd->cmd_complete(cmd, status);
5580 mgmt_pending_remove(cmd);
5583 hci_dev_unlock(hdev);
5586 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5589 struct mgmt_cp_get_conn_info *cp = data;
5590 struct mgmt_rp_get_conn_info rp;
5591 struct hci_conn *conn;
5592 unsigned long conn_info_age;
5595 BT_DBG("%s", hdev->name);
5597 memset(&rp, 0, sizeof(rp));
5598 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5599 rp.addr.type = cp->addr.type;
5601 if (!bdaddr_type_is_valid(cp->addr.type))
5602 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5603 MGMT_STATUS_INVALID_PARAMS,
5608 if (!hdev_is_powered(hdev)) {
5609 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5610 MGMT_STATUS_NOT_POWERED, &rp,
5615 if (cp->addr.type == BDADDR_BREDR)
5616 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5619 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5621 if (!conn || conn->state != BT_CONNECTED) {
5622 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5623 MGMT_STATUS_NOT_CONNECTED, &rp,
5628 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5629 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5630 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5634 /* To avoid client trying to guess when to poll again for information we
5635 * calculate conn info age as random value between min/max set in hdev.
5637 conn_info_age = hdev->conn_info_min_age +
5638 prandom_u32_max(hdev->conn_info_max_age -
5639 hdev->conn_info_min_age);
5641 /* Query controller to refresh cached values if they are too old or were
5644 if (time_after(jiffies, conn->conn_info_timestamp +
5645 msecs_to_jiffies(conn_info_age)) ||
5646 !conn->conn_info_timestamp) {
5647 struct hci_request req;
5648 struct hci_cp_read_tx_power req_txp_cp;
5649 struct hci_cp_read_rssi req_rssi_cp;
5650 struct mgmt_pending_cmd *cmd;
5652 hci_req_init(&req, hdev);
5653 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5654 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5657 /* For LE links TX power does not change thus we don't need to
5658 * query for it once value is known.
5660 if (!bdaddr_type_is_le(cp->addr.type) ||
5661 conn->tx_power == HCI_TX_POWER_INVALID) {
5662 req_txp_cp.handle = cpu_to_le16(conn->handle);
5663 req_txp_cp.type = 0x00;
5664 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5665 sizeof(req_txp_cp), &req_txp_cp);
5668 /* Max TX power needs to be read only once per connection */
5669 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5670 req_txp_cp.handle = cpu_to_le16(conn->handle);
5671 req_txp_cp.type = 0x01;
5672 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5673 sizeof(req_txp_cp), &req_txp_cp);
5676 err = hci_req_run(&req, conn_info_refresh_complete);
5680 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5687 hci_conn_hold(conn);
5688 cmd->user_data = hci_conn_get(conn);
5689 cmd->cmd_complete = conn_info_cmd_complete;
5691 conn->conn_info_timestamp = jiffies;
5693 /* Cache is valid, just reply with values cached in hci_conn */
5694 rp.rssi = conn->rssi;
5695 rp.tx_power = conn->tx_power;
5696 rp.max_tx_power = conn->max_tx_power;
5698 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5699 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5703 hci_dev_unlock(hdev);
5707 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5709 struct hci_conn *conn = cmd->user_data;
5710 struct mgmt_rp_get_clock_info rp;
5711 struct hci_dev *hdev;
5714 memset(&rp, 0, sizeof(rp));
5715 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5720 hdev = hci_dev_get(cmd->index);
5722 rp.local_clock = cpu_to_le32(hdev->clock);
5727 rp.piconet_clock = cpu_to_le32(conn->clock);
5728 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5732 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5736 hci_conn_drop(conn);
5743 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5745 struct hci_cp_read_clock *hci_cp;
5746 struct mgmt_pending_cmd *cmd;
5747 struct hci_conn *conn;
5749 BT_DBG("%s status %u", hdev->name, status);
5753 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5757 if (hci_cp->which) {
5758 u16 handle = __le16_to_cpu(hci_cp->handle);
5759 conn = hci_conn_hash_lookup_handle(hdev, handle);
5764 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5768 cmd->cmd_complete(cmd, mgmt_status(status));
5769 mgmt_pending_remove(cmd);
5772 hci_dev_unlock(hdev);
5775 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5778 struct mgmt_cp_get_clock_info *cp = data;
5779 struct mgmt_rp_get_clock_info rp;
5780 struct hci_cp_read_clock hci_cp;
5781 struct mgmt_pending_cmd *cmd;
5782 struct hci_request req;
5783 struct hci_conn *conn;
5786 BT_DBG("%s", hdev->name);
5788 memset(&rp, 0, sizeof(rp));
5789 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5790 rp.addr.type = cp->addr.type;
5792 if (cp->addr.type != BDADDR_BREDR)
5793 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5794 MGMT_STATUS_INVALID_PARAMS,
5799 if (!hdev_is_powered(hdev)) {
5800 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5801 MGMT_STATUS_NOT_POWERED, &rp,
5806 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5807 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5809 if (!conn || conn->state != BT_CONNECTED) {
5810 err = mgmt_cmd_complete(sk, hdev->id,
5811 MGMT_OP_GET_CLOCK_INFO,
5812 MGMT_STATUS_NOT_CONNECTED,
5820 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5826 cmd->cmd_complete = clock_info_cmd_complete;
5828 hci_req_init(&req, hdev);
5830 memset(&hci_cp, 0, sizeof(hci_cp));
5831 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5834 hci_conn_hold(conn);
5835 cmd->user_data = hci_conn_get(conn);
5837 hci_cp.handle = cpu_to_le16(conn->handle);
5838 hci_cp.which = 0x01; /* Piconet clock */
5839 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5842 err = hci_req_run(&req, get_clock_info_complete);
5844 mgmt_pending_remove(cmd);
5847 hci_dev_unlock(hdev);
5851 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5853 struct hci_conn *conn;
5855 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5859 if (conn->dst_type != type)
5862 if (conn->state != BT_CONNECTED)
5868 /* This function requires the caller holds hdev->lock */
5869 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5870 u8 addr_type, u8 auto_connect)
5872 struct hci_dev *hdev = req->hdev;
5873 struct hci_conn_params *params;
5875 params = hci_conn_params_add(hdev, addr, addr_type);
5879 if (params->auto_connect == auto_connect)
5882 list_del_init(¶ms->action);
5884 switch (auto_connect) {
5885 case HCI_AUTO_CONN_DISABLED:
5886 case HCI_AUTO_CONN_LINK_LOSS:
5887 __hci_update_background_scan(req);
5889 case HCI_AUTO_CONN_REPORT:
5890 list_add(¶ms->action, &hdev->pend_le_reports);
5891 __hci_update_background_scan(req);
5893 case HCI_AUTO_CONN_DIRECT:
5894 case HCI_AUTO_CONN_ALWAYS:
5895 if (!is_connected(hdev, addr, addr_type)) {
5896 list_add(¶ms->action, &hdev->pend_le_conns);
5897 __hci_update_background_scan(req);
5902 params->auto_connect = auto_connect;
5904 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5910 static void device_added(struct sock *sk, struct hci_dev *hdev,
5911 bdaddr_t *bdaddr, u8 type, u8 action)
5913 struct mgmt_ev_device_added ev;
5915 bacpy(&ev.addr.bdaddr, bdaddr);
5916 ev.addr.type = type;
5919 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5922 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5924 struct mgmt_pending_cmd *cmd;
5926 BT_DBG("status 0x%02x", status);
5930 cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
5934 cmd->cmd_complete(cmd, mgmt_status(status));
5935 mgmt_pending_remove(cmd);
5938 hci_dev_unlock(hdev);
5941 static int add_device(struct sock *sk, struct hci_dev *hdev,
5942 void *data, u16 len)
5944 struct mgmt_cp_add_device *cp = data;
5945 struct mgmt_pending_cmd *cmd;
5946 struct hci_request req;
5947 u8 auto_conn, addr_type;
5950 BT_DBG("%s", hdev->name);
5952 if (!bdaddr_type_is_valid(cp->addr.type) ||
5953 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5954 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5955 MGMT_STATUS_INVALID_PARAMS,
5956 &cp->addr, sizeof(cp->addr));
5958 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5959 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5960 MGMT_STATUS_INVALID_PARAMS,
5961 &cp->addr, sizeof(cp->addr));
5963 hci_req_init(&req, hdev);
5967 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5973 cmd->cmd_complete = addr_cmd_complete;
5975 if (cp->addr.type == BDADDR_BREDR) {
5976 /* Only incoming connections action is supported for now */
5977 if (cp->action != 0x01) {
5978 err = cmd->cmd_complete(cmd,
5979 MGMT_STATUS_INVALID_PARAMS);
5980 mgmt_pending_remove(cmd);
5984 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5989 __hci_update_page_scan(&req);
5994 if (cp->addr.type == BDADDR_LE_PUBLIC)
5995 addr_type = ADDR_LE_DEV_PUBLIC;
5997 addr_type = ADDR_LE_DEV_RANDOM;
5999 if (cp->action == 0x02)
6000 auto_conn = HCI_AUTO_CONN_ALWAYS;
6001 else if (cp->action == 0x01)
6002 auto_conn = HCI_AUTO_CONN_DIRECT;
6004 auto_conn = HCI_AUTO_CONN_REPORT;
6006 /* If the connection parameters don't exist for this device,
6007 * they will be created and configured with defaults.
6009 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
6011 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
6012 mgmt_pending_remove(cmd);
6017 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6019 err = hci_req_run(&req, add_device_complete);
6021 /* ENODATA means no HCI commands were needed (e.g. if
6022 * the adapter is powered off).
6024 if (err == -ENODATA)
6025 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6026 mgmt_pending_remove(cmd);
6030 hci_dev_unlock(hdev);
6034 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6035 bdaddr_t *bdaddr, u8 type)
6037 struct mgmt_ev_device_removed ev;
6039 bacpy(&ev.addr.bdaddr, bdaddr);
6040 ev.addr.type = type;
6042 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6045 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6047 struct mgmt_pending_cmd *cmd;
6049 BT_DBG("status 0x%02x", status);
6053 cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
6057 cmd->cmd_complete(cmd, mgmt_status(status));
6058 mgmt_pending_remove(cmd);
6061 hci_dev_unlock(hdev);
6064 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6065 void *data, u16 len)
6067 struct mgmt_cp_remove_device *cp = data;
6068 struct mgmt_pending_cmd *cmd;
6069 struct hci_request req;
6072 BT_DBG("%s", hdev->name);
6074 hci_req_init(&req, hdev);
6078 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
6084 cmd->cmd_complete = addr_cmd_complete;
6086 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6087 struct hci_conn_params *params;
6090 if (!bdaddr_type_is_valid(cp->addr.type)) {
6091 err = cmd->cmd_complete(cmd,
6092 MGMT_STATUS_INVALID_PARAMS);
6093 mgmt_pending_remove(cmd);
6097 if (cp->addr.type == BDADDR_BREDR) {
6098 err = hci_bdaddr_list_del(&hdev->whitelist,
6102 err = cmd->cmd_complete(cmd,
6103 MGMT_STATUS_INVALID_PARAMS);
6104 mgmt_pending_remove(cmd);
6108 __hci_update_page_scan(&req);
6110 device_removed(sk, hdev, &cp->addr.bdaddr,
6115 if (cp->addr.type == BDADDR_LE_PUBLIC)
6116 addr_type = ADDR_LE_DEV_PUBLIC;
6118 addr_type = ADDR_LE_DEV_RANDOM;
6120 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6123 err = cmd->cmd_complete(cmd,
6124 MGMT_STATUS_INVALID_PARAMS);
6125 mgmt_pending_remove(cmd);
6129 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
6130 err = cmd->cmd_complete(cmd,
6131 MGMT_STATUS_INVALID_PARAMS);
6132 mgmt_pending_remove(cmd);
6136 list_del(¶ms->action);
6137 list_del(¶ms->list);
6139 __hci_update_background_scan(&req);
6141 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6143 struct hci_conn_params *p, *tmp;
6144 struct bdaddr_list *b, *btmp;
6146 if (cp->addr.type) {
6147 err = cmd->cmd_complete(cmd,
6148 MGMT_STATUS_INVALID_PARAMS);
6149 mgmt_pending_remove(cmd);
6153 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6154 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6159 __hci_update_page_scan(&req);
6161 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6162 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6164 device_removed(sk, hdev, &p->addr, p->addr_type);
6165 list_del(&p->action);
6170 BT_DBG("All LE connection parameters were removed");
6172 __hci_update_background_scan(&req);
6176 err = hci_req_run(&req, remove_device_complete);
6178 /* ENODATA means no HCI commands were needed (e.g. if
6179 * the adapter is powered off).
6181 if (err == -ENODATA)
6182 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6183 mgmt_pending_remove(cmd);
6187 hci_dev_unlock(hdev);
6191 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6194 struct mgmt_cp_load_conn_param *cp = data;
6195 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6196 sizeof(struct mgmt_conn_param));
6197 u16 param_count, expected_len;
6200 if (!lmp_le_capable(hdev))
6201 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6202 MGMT_STATUS_NOT_SUPPORTED);
6204 param_count = __le16_to_cpu(cp->param_count);
6205 if (param_count > max_param_count) {
6206 BT_ERR("load_conn_param: too big param_count value %u",
6208 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6209 MGMT_STATUS_INVALID_PARAMS);
6212 expected_len = sizeof(*cp) + param_count *
6213 sizeof(struct mgmt_conn_param);
6214 if (expected_len != len) {
6215 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6217 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6218 MGMT_STATUS_INVALID_PARAMS);
6221 BT_DBG("%s param_count %u", hdev->name, param_count);
6225 hci_conn_params_clear_disabled(hdev);
6227 for (i = 0; i < param_count; i++) {
6228 struct mgmt_conn_param *param = &cp->params[i];
6229 struct hci_conn_params *hci_param;
6230 u16 min, max, latency, timeout;
6233 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
6236 if (param->addr.type == BDADDR_LE_PUBLIC) {
6237 addr_type = ADDR_LE_DEV_PUBLIC;
6238 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6239 addr_type = ADDR_LE_DEV_RANDOM;
6241 BT_ERR("Ignoring invalid connection parameters");
6245 min = le16_to_cpu(param->min_interval);
6246 max = le16_to_cpu(param->max_interval);
6247 latency = le16_to_cpu(param->latency);
6248 timeout = le16_to_cpu(param->timeout);
6250 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6251 min, max, latency, timeout);
6253 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6254 BT_ERR("Ignoring invalid connection parameters");
6258 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6261 BT_ERR("Failed to add connection parameters");
6265 hci_param->conn_min_interval = min;
6266 hci_param->conn_max_interval = max;
6267 hci_param->conn_latency = latency;
6268 hci_param->supervision_timeout = timeout;
6271 hci_dev_unlock(hdev);
6273 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6277 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6278 void *data, u16 len)
6280 struct mgmt_cp_set_external_config *cp = data;
6284 BT_DBG("%s", hdev->name);
6286 if (hdev_is_powered(hdev))
6287 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6288 MGMT_STATUS_REJECTED);
6290 if (cp->config != 0x00 && cp->config != 0x01)
6291 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6292 MGMT_STATUS_INVALID_PARAMS);
6294 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6295 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6296 MGMT_STATUS_NOT_SUPPORTED);
6301 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6303 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6305 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6312 err = new_options(hdev, sk);
6314 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6315 mgmt_index_removed(hdev);
6317 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6318 hci_dev_set_flag(hdev, HCI_CONFIG);
6319 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6321 queue_work(hdev->req_workqueue, &hdev->power_on);
6323 set_bit(HCI_RAW, &hdev->flags);
6324 mgmt_index_added(hdev);
6329 hci_dev_unlock(hdev);
6333 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6334 void *data, u16 len)
6336 struct mgmt_cp_set_public_address *cp = data;
6340 BT_DBG("%s", hdev->name);
6342 if (hdev_is_powered(hdev))
6343 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6344 MGMT_STATUS_REJECTED);
6346 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6347 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6348 MGMT_STATUS_INVALID_PARAMS);
6350 if (!hdev->set_bdaddr)
6351 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6352 MGMT_STATUS_NOT_SUPPORTED);
6356 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6357 bacpy(&hdev->public_addr, &cp->bdaddr);
6359 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6366 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6367 err = new_options(hdev, sk);
6369 if (is_configured(hdev)) {
6370 mgmt_index_removed(hdev);
6372 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6374 hci_dev_set_flag(hdev, HCI_CONFIG);
6375 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6377 queue_work(hdev->req_workqueue, &hdev->power_on);
6381 hci_dev_unlock(hdev);
6385 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6388 eir[eir_len++] = sizeof(type) + data_len;
6389 eir[eir_len++] = type;
6390 memcpy(&eir[eir_len], data, data_len);
6391 eir_len += data_len;
6396 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6397 void *data, u16 data_len)
6399 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6400 struct mgmt_rp_read_local_oob_ext_data *rp;
6403 u8 status, flags, role, addr[7], hash[16], rand[16];
6406 BT_DBG("%s", hdev->name);
6408 if (!hdev_is_powered(hdev))
6409 return mgmt_cmd_complete(sk, hdev->id,
6410 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6411 MGMT_STATUS_NOT_POWERED,
6412 &cp->type, sizeof(cp->type));
6415 case BIT(BDADDR_BREDR):
6416 status = mgmt_bredr_support(hdev);
6418 return mgmt_cmd_complete(sk, hdev->id,
6419 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6424 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6425 status = mgmt_le_support(hdev);
6427 return mgmt_cmd_complete(sk, hdev->id,
6428 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6431 eir_len = 9 + 3 + 18 + 18 + 3;
6434 return mgmt_cmd_complete(sk, hdev->id,
6435 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6436 MGMT_STATUS_INVALID_PARAMS,
6437 &cp->type, sizeof(cp->type));
6442 rp_len = sizeof(*rp) + eir_len;
6443 rp = kmalloc(rp_len, GFP_ATOMIC);
6445 hci_dev_unlock(hdev);
6451 case BIT(BDADDR_BREDR):
6452 eir_len = eir_append_data(rp->eir, eir_len, EIR_CLASS_OF_DEV,
6453 hdev->dev_class, 3);
6455 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6456 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6457 smp_generate_oob(hdev, hash, rand) < 0) {
6458 hci_dev_unlock(hdev);
6459 err = mgmt_cmd_complete(sk, hdev->id,
6460 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6462 &cp->type, sizeof(cp->type));
6466 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6467 memcpy(addr, &hdev->rpa, 6);
6469 } else if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6470 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6471 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6472 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6473 memcpy(addr, &hdev->static_addr, 6);
6476 memcpy(addr, &hdev->bdaddr, 6);
6480 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6481 addr, sizeof(addr));
6483 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6488 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6489 &role, sizeof(role));
6491 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6492 eir_len = eir_append_data(rp->eir, eir_len,
6494 hash, sizeof(hash));
6496 eir_len = eir_append_data(rp->eir, eir_len,
6498 rand, sizeof(rand));
6501 flags = get_adv_discov_flags(hdev);
6503 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6504 flags |= LE_AD_NO_BREDR;
6506 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6507 &flags, sizeof(flags));
6511 rp->type = cp->type;
6512 rp->eir_len = cpu_to_le16(eir_len);
6514 hci_dev_unlock(hdev);
6516 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6518 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6519 MGMT_STATUS_SUCCESS, rp, sizeof(*rp) + eir_len);
6523 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6524 rp, sizeof(*rp) + eir_len,
6525 HCI_MGMT_OOB_DATA_EVENTS, sk);
6533 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6534 void *data, u16 data_len)
6536 struct mgmt_rp_read_adv_features *rp;
6541 BT_DBG("%s", hdev->name);
6545 rp_len = sizeof(*rp);
6547 /* Currently only one instance is supported, so just add 1 to the
6550 instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
6554 rp = kmalloc(rp_len, GFP_ATOMIC);
6556 hci_dev_unlock(hdev);
6560 rp->supported_flags = cpu_to_le32(0);
6561 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6562 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6563 rp->max_instances = 1;
6565 /* Currently only one instance is supported, so simply return the
6566 * current instance number.
6569 rp->num_instances = 1;
6570 rp->instance[0] = 1;
6572 rp->num_instances = 0;
6575 hci_dev_unlock(hdev);
6577 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6578 MGMT_STATUS_SUCCESS, rp, rp_len);
6585 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6586 u8 len, bool is_adv_data)
6588 u8 max_len = HCI_MAX_AD_LENGTH;
6590 bool flags_managed = false;
6591 u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
6592 MGMT_ADV_FLAG_MANAGED_FLAGS;
6594 if (is_adv_data && (adv_flags & flags_params)) {
6595 flags_managed = true;
6602 /* Make sure that the data is correctly formatted. */
6603 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6606 if (flags_managed && data[i + 1] == EIR_FLAGS)
6609 /* If the current field length would exceed the total data
6610 * length, then it's invalid.
6612 if (i + cur_len >= len)
6619 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6622 struct mgmt_pending_cmd *cmd;
6623 struct mgmt_rp_add_advertising rp;
6625 BT_DBG("status %d", status);
6629 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6632 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6633 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6634 advertising_removed(cmd ? cmd->sk : NULL, hdev, 1);
6643 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6644 mgmt_status(status));
6646 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6647 mgmt_status(status), &rp, sizeof(rp));
6649 mgmt_pending_remove(cmd);
6652 hci_dev_unlock(hdev);
6655 static void adv_timeout_expired(struct work_struct *work)
6657 struct hci_dev *hdev = container_of(work, struct hci_dev,
6658 adv_instance.timeout_exp.work);
6660 hdev->adv_instance.timeout = 0;
6663 clear_adv_instance(hdev);
6664 hci_dev_unlock(hdev);
6667 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6668 void *data, u16 data_len)
6670 struct mgmt_cp_add_advertising *cp = data;
6671 struct mgmt_rp_add_advertising rp;
6676 struct mgmt_pending_cmd *cmd;
6677 struct hci_request req;
6679 BT_DBG("%s", hdev->name);
6681 status = mgmt_le_support(hdev);
6683 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6686 flags = __le32_to_cpu(cp->flags);
6687 timeout = __le16_to_cpu(cp->timeout);
6689 /* The current implementation only supports adding one instance */
6690 if (cp->instance != 0x01)
6691 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6692 MGMT_STATUS_INVALID_PARAMS);
6696 if (timeout && !hdev_is_powered(hdev)) {
6697 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6698 MGMT_STATUS_REJECTED);
6702 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6703 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6704 pending_find(MGMT_OP_SET_LE, hdev)) {
6705 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6710 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6711 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6712 cp->scan_rsp_len, false)) {
6713 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6714 MGMT_STATUS_INVALID_PARAMS);
6718 INIT_DELAYED_WORK(&hdev->adv_instance.timeout_exp, adv_timeout_expired);
6720 hdev->adv_instance.flags = flags;
6721 hdev->adv_instance.adv_data_len = cp->adv_data_len;
6722 hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len;
6724 if (cp->adv_data_len)
6725 memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len);
6727 if (cp->scan_rsp_len)
6728 memcpy(hdev->adv_instance.scan_rsp_data,
6729 cp->data + cp->adv_data_len, cp->scan_rsp_len);
6731 if (hdev->adv_instance.timeout)
6732 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
6734 hdev->adv_instance.timeout = timeout;
6737 queue_delayed_work(hdev->workqueue,
6738 &hdev->adv_instance.timeout_exp,
6739 msecs_to_jiffies(timeout * 1000));
6741 if (!hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING_INSTANCE))
6742 advertising_added(sk, hdev, 1);
6744 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
6745 * we have no HCI communication to make. Simply return.
6747 if (!hdev_is_powered(hdev) ||
6748 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6750 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6751 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6755 /* We're good to go, update advertising data, parameters, and start
6758 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6765 hci_req_init(&req, hdev);
6767 update_adv_data(&req);
6768 update_scan_rsp_data(&req);
6769 enable_advertising(&req);
6771 err = hci_req_run(&req, add_advertising_complete);
6773 mgmt_pending_remove(cmd);
6776 hci_dev_unlock(hdev);
6781 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6784 struct mgmt_pending_cmd *cmd;
6785 struct mgmt_rp_remove_advertising rp;
6787 BT_DBG("status %d", status);
6791 /* A failure status here only means that we failed to disable
6792 * advertising. Otherwise, the advertising instance has been removed,
6793 * so report success.
6795 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6801 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6803 mgmt_pending_remove(cmd);
6806 hci_dev_unlock(hdev);
6809 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6810 void *data, u16 data_len)
6812 struct mgmt_cp_remove_advertising *cp = data;
6813 struct mgmt_rp_remove_advertising rp;
6815 struct mgmt_pending_cmd *cmd;
6816 struct hci_request req;
6818 BT_DBG("%s", hdev->name);
6820 /* The current implementation only allows modifying instance no 1. A
6821 * value of 0 indicates that all instances should be cleared.
6823 if (cp->instance > 1)
6824 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6825 MGMT_STATUS_INVALID_PARAMS);
6829 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6830 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6831 pending_find(MGMT_OP_SET_LE, hdev)) {
6832 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6837 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
6838 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6839 MGMT_STATUS_INVALID_PARAMS);
6843 if (hdev->adv_instance.timeout)
6844 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
6846 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6848 advertising_removed(sk, hdev, 1);
6850 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6852 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
6853 * we have no HCI communication to make. Simply return.
6855 if (!hdev_is_powered(hdev) ||
6856 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6858 err = mgmt_cmd_complete(sk, hdev->id,
6859 MGMT_OP_REMOVE_ADVERTISING,
6860 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6864 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6871 hci_req_init(&req, hdev);
6872 disable_advertising(&req);
6874 err = hci_req_run(&req, remove_advertising_complete);
6876 mgmt_pending_remove(cmd);
6879 hci_dev_unlock(hdev);
6884 static const struct hci_mgmt_handler mgmt_handlers[] = {
6885 { NULL }, /* 0x0000 (no command) */
6886 { read_version, MGMT_READ_VERSION_SIZE,
6888 HCI_MGMT_UNTRUSTED },
6889 { read_commands, MGMT_READ_COMMANDS_SIZE,
6891 HCI_MGMT_UNTRUSTED },
6892 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6894 HCI_MGMT_UNTRUSTED },
6895 { read_controller_info, MGMT_READ_INFO_SIZE,
6896 HCI_MGMT_UNTRUSTED },
6897 { set_powered, MGMT_SETTING_SIZE },
6898 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
6899 { set_connectable, MGMT_SETTING_SIZE },
6900 { set_fast_connectable, MGMT_SETTING_SIZE },
6901 { set_bondable, MGMT_SETTING_SIZE },
6902 { set_link_security, MGMT_SETTING_SIZE },
6903 { set_ssp, MGMT_SETTING_SIZE },
6904 { set_hs, MGMT_SETTING_SIZE },
6905 { set_le, MGMT_SETTING_SIZE },
6906 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
6907 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
6908 { add_uuid, MGMT_ADD_UUID_SIZE },
6909 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
6910 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6912 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6914 { disconnect, MGMT_DISCONNECT_SIZE },
6915 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
6916 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
6917 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6918 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
6919 { pair_device, MGMT_PAIR_DEVICE_SIZE },
6920 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6921 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
6922 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
6923 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6924 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
6925 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6926 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6927 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6929 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6930 { start_discovery, MGMT_START_DISCOVERY_SIZE },
6931 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
6932 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
6933 { block_device, MGMT_BLOCK_DEVICE_SIZE },
6934 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
6935 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
6936 { set_advertising, MGMT_SETTING_SIZE },
6937 { set_bredr, MGMT_SETTING_SIZE },
6938 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
6939 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
6940 { set_secure_conn, MGMT_SETTING_SIZE },
6941 { set_debug_keys, MGMT_SETTING_SIZE },
6942 { set_privacy, MGMT_SET_PRIVACY_SIZE },
6943 { load_irks, MGMT_LOAD_IRKS_SIZE,
6945 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
6946 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
6947 { add_device, MGMT_ADD_DEVICE_SIZE },
6948 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
6949 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6951 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6953 HCI_MGMT_UNTRUSTED },
6954 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6955 HCI_MGMT_UNCONFIGURED |
6956 HCI_MGMT_UNTRUSTED },
6957 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6958 HCI_MGMT_UNCONFIGURED },
6959 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6960 HCI_MGMT_UNCONFIGURED },
6961 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6963 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6964 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
6966 HCI_MGMT_UNTRUSTED },
6967 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
6968 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
6970 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
6973 void mgmt_index_added(struct hci_dev *hdev)
6975 struct mgmt_ev_ext_index ev;
6977 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6980 switch (hdev->dev_type) {
6982 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6983 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
6984 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6987 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
6988 HCI_MGMT_INDEX_EVENTS);
7001 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7002 HCI_MGMT_EXT_INDEX_EVENTS);
7005 void mgmt_index_removed(struct hci_dev *hdev)
7007 struct mgmt_ev_ext_index ev;
7008 u8 status = MGMT_STATUS_INVALID_INDEX;
7010 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7013 switch (hdev->dev_type) {
7015 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7017 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7018 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7019 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7022 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7023 HCI_MGMT_INDEX_EVENTS);
7036 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7037 HCI_MGMT_EXT_INDEX_EVENTS);
7040 /* This function requires the caller holds hdev->lock */
7041 static void restart_le_actions(struct hci_request *req)
7043 struct hci_dev *hdev = req->hdev;
7044 struct hci_conn_params *p;
7046 list_for_each_entry(p, &hdev->le_conn_params, list) {
7047 /* Needed for AUTO_OFF case where might not "really"
7048 * have been powered off.
7050 list_del_init(&p->action);
7052 switch (p->auto_connect) {
7053 case HCI_AUTO_CONN_DIRECT:
7054 case HCI_AUTO_CONN_ALWAYS:
7055 list_add(&p->action, &hdev->pend_le_conns);
7057 case HCI_AUTO_CONN_REPORT:
7058 list_add(&p->action, &hdev->pend_le_reports);
7065 __hci_update_background_scan(req);
7068 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7070 struct cmd_lookup match = { NULL, hdev };
7072 BT_DBG("status 0x%02x", status);
7075 /* Register the available SMP channels (BR/EDR and LE) only
7076 * when successfully powering on the controller. This late
7077 * registration is required so that LE SMP can clearly
7078 * decide if the public address or static address is used.
7085 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7087 new_settings(hdev, match.sk);
7089 hci_dev_unlock(hdev);
7095 static int powered_update_hci(struct hci_dev *hdev)
7097 struct hci_request req;
7100 hci_req_init(&req, hdev);
7102 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
7103 !lmp_host_ssp_capable(hdev)) {
7106 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
7108 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
7111 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
7112 sizeof(support), &support);
7116 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7117 lmp_bredr_capable(hdev)) {
7118 struct hci_cp_write_le_host_supported cp;
7123 /* Check first if we already have the right
7124 * host state (host features set)
7126 if (cp.le != lmp_host_le_capable(hdev) ||
7127 cp.simul != lmp_host_le_br_capable(hdev))
7128 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
7132 if (lmp_le_capable(hdev)) {
7133 /* Make sure the controller has a good default for
7134 * advertising data. This also applies to the case
7135 * where BR/EDR was toggled during the AUTO_OFF phase.
7137 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
7138 update_adv_data(&req);
7139 update_scan_rsp_data(&req);
7142 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7143 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
7144 enable_advertising(&req);
7146 restart_le_actions(&req);
7149 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
7150 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
7151 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
7152 sizeof(link_sec), &link_sec);
7154 if (lmp_bredr_capable(hdev)) {
7155 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
7156 write_fast_connectable(&req, true);
7158 write_fast_connectable(&req, false);
7159 __hci_update_page_scan(&req);
7165 return hci_req_run(&req, powered_complete);
7168 int mgmt_powered(struct hci_dev *hdev, u8 powered)
7170 struct cmd_lookup match = { NULL, hdev };
7171 u8 status, zero_cod[] = { 0, 0, 0 };
7174 if (!hci_dev_test_flag(hdev, HCI_MGMT))
7178 if (powered_update_hci(hdev) == 0)
7181 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
7186 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7188 /* If the power off is because of hdev unregistration let
7189 * use the appropriate INVALID_INDEX status. Otherwise use
7190 * NOT_POWERED. We cover both scenarios here since later in
7191 * mgmt_index_removed() any hci_conn callbacks will have already
7192 * been triggered, potentially causing misleading DISCONNECTED
7195 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7196 status = MGMT_STATUS_INVALID_INDEX;
7198 status = MGMT_STATUS_NOT_POWERED;
7200 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7202 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
7203 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7204 zero_cod, sizeof(zero_cod), NULL);
7207 err = new_settings(hdev, match.sk);
7215 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7217 struct mgmt_pending_cmd *cmd;
7220 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7224 if (err == -ERFKILL)
7225 status = MGMT_STATUS_RFKILLED;
7227 status = MGMT_STATUS_FAILED;
7229 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7231 mgmt_pending_remove(cmd);
7234 void mgmt_discoverable_timeout(struct hci_dev *hdev)
7236 struct hci_request req;
7240 /* When discoverable timeout triggers, then just make sure
7241 * the limited discoverable flag is cleared. Even in the case
7242 * of a timeout triggered from general discoverable, it is
7243 * safe to unconditionally clear the flag.
7245 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
7246 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
7248 hci_req_init(&req, hdev);
7249 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7250 u8 scan = SCAN_PAGE;
7251 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
7252 sizeof(scan), &scan);
7256 /* Advertising instances don't use the global discoverable setting, so
7257 * only update AD if advertising was enabled using Set Advertising.
7259 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7260 update_adv_data(&req);
7262 hci_req_run(&req, NULL);
7264 hdev->discov_timeout = 0;
7266 new_settings(hdev, NULL);
7268 hci_dev_unlock(hdev);
7271 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7274 struct mgmt_ev_new_link_key ev;
7276 memset(&ev, 0, sizeof(ev));
7278 ev.store_hint = persistent;
7279 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7280 ev.key.addr.type = BDADDR_BREDR;
7281 ev.key.type = key->type;
7282 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7283 ev.key.pin_len = key->pin_len;
7285 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7288 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7290 switch (ltk->type) {
7293 if (ltk->authenticated)
7294 return MGMT_LTK_AUTHENTICATED;
7295 return MGMT_LTK_UNAUTHENTICATED;
7297 if (ltk->authenticated)
7298 return MGMT_LTK_P256_AUTH;
7299 return MGMT_LTK_P256_UNAUTH;
7300 case SMP_LTK_P256_DEBUG:
7301 return MGMT_LTK_P256_DEBUG;
7304 return MGMT_LTK_UNAUTHENTICATED;
7307 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7309 struct mgmt_ev_new_long_term_key ev;
7311 memset(&ev, 0, sizeof(ev));
7313 /* Devices using resolvable or non-resolvable random addresses
7314 * without providing an indentity resolving key don't require
7315 * to store long term keys. Their addresses will change the
7318 * Only when a remote device provides an identity address
7319 * make sure the long term key is stored. If the remote
7320 * identity is known, the long term keys are internally
7321 * mapped to the identity address. So allow static random
7322 * and public addresses here.
7324 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7325 (key->bdaddr.b[5] & 0xc0) != 0xc0)
7326 ev.store_hint = 0x00;
7328 ev.store_hint = persistent;
7330 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7331 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7332 ev.key.type = mgmt_ltk_type(key);
7333 ev.key.enc_size = key->enc_size;
7334 ev.key.ediv = key->ediv;
7335 ev.key.rand = key->rand;
7337 if (key->type == SMP_LTK)
7340 memcpy(ev.key.val, key->val, sizeof(key->val));
7342 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7345 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
7347 struct mgmt_ev_new_irk ev;
7349 memset(&ev, 0, sizeof(ev));
7351 /* For identity resolving keys from devices that are already
7352 * using a public address or static random address, do not
7353 * ask for storing this key. The identity resolving key really
7354 * is only mandatory for devices using resovlable random
7357 * Storing all identity resolving keys has the downside that
7358 * they will be also loaded on next boot of they system. More
7359 * identity resolving keys, means more time during scanning is
7360 * needed to actually resolve these addresses.
7362 if (bacmp(&irk->rpa, BDADDR_ANY))
7363 ev.store_hint = 0x01;
7365 ev.store_hint = 0x00;
7367 bacpy(&ev.rpa, &irk->rpa);
7368 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7369 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7370 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7372 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7375 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7378 struct mgmt_ev_new_csrk ev;
7380 memset(&ev, 0, sizeof(ev));
7382 /* Devices using resolvable or non-resolvable random addresses
7383 * without providing an indentity resolving key don't require
7384 * to store signature resolving keys. Their addresses will change
7385 * the next time around.
7387 * Only when a remote device provides an identity address
7388 * make sure the signature resolving key is stored. So allow
7389 * static random and public addresses here.
7391 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7392 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7393 ev.store_hint = 0x00;
7395 ev.store_hint = persistent;
7397 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7398 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7399 ev.key.type = csrk->type;
7400 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7402 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7405 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7406 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7407 u16 max_interval, u16 latency, u16 timeout)
7409 struct mgmt_ev_new_conn_param ev;
7411 if (!hci_is_identity_address(bdaddr, bdaddr_type))
7414 memset(&ev, 0, sizeof(ev));
7415 bacpy(&ev.addr.bdaddr, bdaddr);
7416 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7417 ev.store_hint = store_hint;
7418 ev.min_interval = cpu_to_le16(min_interval);
7419 ev.max_interval = cpu_to_le16(max_interval);
7420 ev.latency = cpu_to_le16(latency);
7421 ev.timeout = cpu_to_le16(timeout);
7423 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7426 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7427 u32 flags, u8 *name, u8 name_len)
7430 struct mgmt_ev_device_connected *ev = (void *) buf;
7433 bacpy(&ev->addr.bdaddr, &conn->dst);
7434 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7436 ev->flags = __cpu_to_le32(flags);
7438 /* We must ensure that the EIR Data fields are ordered and
7439 * unique. Keep it simple for now and avoid the problem by not
7440 * adding any BR/EDR data to the LE adv.
7442 if (conn->le_adv_data_len > 0) {
7443 memcpy(&ev->eir[eir_len],
7444 conn->le_adv_data, conn->le_adv_data_len);
7445 eir_len = conn->le_adv_data_len;
7448 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7451 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7452 eir_len = eir_append_data(ev->eir, eir_len,
7454 conn->dev_class, 3);
7457 ev->eir_len = cpu_to_le16(eir_len);
7459 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7460 sizeof(*ev) + eir_len, NULL);
7463 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7465 struct sock **sk = data;
7467 cmd->cmd_complete(cmd, 0);
7472 mgmt_pending_remove(cmd);
7475 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7477 struct hci_dev *hdev = data;
7478 struct mgmt_cp_unpair_device *cp = cmd->param;
7480 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7482 cmd->cmd_complete(cmd, 0);
7483 mgmt_pending_remove(cmd);
7486 bool mgmt_powering_down(struct hci_dev *hdev)
7488 struct mgmt_pending_cmd *cmd;
7489 struct mgmt_mode *cp;
7491 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7502 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7503 u8 link_type, u8 addr_type, u8 reason,
7504 bool mgmt_connected)
7506 struct mgmt_ev_device_disconnected ev;
7507 struct sock *sk = NULL;
7509 /* The connection is still in hci_conn_hash so test for 1
7510 * instead of 0 to know if this is the last one.
7512 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7513 cancel_delayed_work(&hdev->power_off);
7514 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7517 if (!mgmt_connected)
7520 if (link_type != ACL_LINK && link_type != LE_LINK)
7523 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7525 bacpy(&ev.addr.bdaddr, bdaddr);
7526 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7529 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7534 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7538 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7539 u8 link_type, u8 addr_type, u8 status)
7541 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7542 struct mgmt_cp_disconnect *cp;
7543 struct mgmt_pending_cmd *cmd;
7545 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7548 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7554 if (bacmp(bdaddr, &cp->addr.bdaddr))
7557 if (cp->addr.type != bdaddr_type)
7560 cmd->cmd_complete(cmd, mgmt_status(status));
7561 mgmt_pending_remove(cmd);
7564 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7565 u8 addr_type, u8 status)
7567 struct mgmt_ev_connect_failed ev;
7569 /* The connection is still in hci_conn_hash so test for 1
7570 * instead of 0 to know if this is the last one.
7572 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7573 cancel_delayed_work(&hdev->power_off);
7574 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7577 bacpy(&ev.addr.bdaddr, bdaddr);
7578 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7579 ev.status = mgmt_status(status);
7581 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7584 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7586 struct mgmt_ev_pin_code_request ev;
7588 bacpy(&ev.addr.bdaddr, bdaddr);
7589 ev.addr.type = BDADDR_BREDR;
7592 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7595 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7598 struct mgmt_pending_cmd *cmd;
7600 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7604 cmd->cmd_complete(cmd, mgmt_status(status));
7605 mgmt_pending_remove(cmd);
7608 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7611 struct mgmt_pending_cmd *cmd;
7613 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7617 cmd->cmd_complete(cmd, mgmt_status(status));
7618 mgmt_pending_remove(cmd);
7621 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7622 u8 link_type, u8 addr_type, u32 value,
7625 struct mgmt_ev_user_confirm_request ev;
7627 BT_DBG("%s", hdev->name);
7629 bacpy(&ev.addr.bdaddr, bdaddr);
7630 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7631 ev.confirm_hint = confirm_hint;
7632 ev.value = cpu_to_le32(value);
7634 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7638 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7639 u8 link_type, u8 addr_type)
7641 struct mgmt_ev_user_passkey_request ev;
7643 BT_DBG("%s", hdev->name);
7645 bacpy(&ev.addr.bdaddr, bdaddr);
7646 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7648 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7652 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7653 u8 link_type, u8 addr_type, u8 status,
7656 struct mgmt_pending_cmd *cmd;
7658 cmd = pending_find(opcode, hdev);
7662 cmd->cmd_complete(cmd, mgmt_status(status));
7663 mgmt_pending_remove(cmd);
7668 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7669 u8 link_type, u8 addr_type, u8 status)
7671 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7672 status, MGMT_OP_USER_CONFIRM_REPLY);
7675 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7676 u8 link_type, u8 addr_type, u8 status)
7678 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7680 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7683 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7684 u8 link_type, u8 addr_type, u8 status)
7686 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7687 status, MGMT_OP_USER_PASSKEY_REPLY);
7690 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7691 u8 link_type, u8 addr_type, u8 status)
7693 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7695 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7698 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7699 u8 link_type, u8 addr_type, u32 passkey,
7702 struct mgmt_ev_passkey_notify ev;
7704 BT_DBG("%s", hdev->name);
7706 bacpy(&ev.addr.bdaddr, bdaddr);
7707 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7708 ev.passkey = __cpu_to_le32(passkey);
7709 ev.entered = entered;
7711 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7714 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7716 struct mgmt_ev_auth_failed ev;
7717 struct mgmt_pending_cmd *cmd;
7718 u8 status = mgmt_status(hci_status);
7720 bacpy(&ev.addr.bdaddr, &conn->dst);
7721 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7724 cmd = find_pairing(conn);
7726 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7727 cmd ? cmd->sk : NULL);
7730 cmd->cmd_complete(cmd, status);
7731 mgmt_pending_remove(cmd);
7735 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7737 struct cmd_lookup match = { NULL, hdev };
7741 u8 mgmt_err = mgmt_status(status);
7742 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7743 cmd_status_rsp, &mgmt_err);
7747 if (test_bit(HCI_AUTH, &hdev->flags))
7748 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7750 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7752 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7756 new_settings(hdev, match.sk);
7762 static void clear_eir(struct hci_request *req)
7764 struct hci_dev *hdev = req->hdev;
7765 struct hci_cp_write_eir cp;
7767 if (!lmp_ext_inq_capable(hdev))
7770 memset(hdev->eir, 0, sizeof(hdev->eir));
7772 memset(&cp, 0, sizeof(cp));
7774 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7777 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7779 struct cmd_lookup match = { NULL, hdev };
7780 struct hci_request req;
7781 bool changed = false;
7784 u8 mgmt_err = mgmt_status(status);
7786 if (enable && hci_dev_test_and_clear_flag(hdev,
7788 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7789 new_settings(hdev, NULL);
7792 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7798 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7800 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7802 changed = hci_dev_test_and_clear_flag(hdev,
7805 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7808 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7811 new_settings(hdev, match.sk);
7816 hci_req_init(&req, hdev);
7818 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7819 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7820 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7821 sizeof(enable), &enable);
7827 hci_req_run(&req, NULL);
7830 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7832 struct cmd_lookup *match = data;
7834 if (match->sk == NULL) {
7835 match->sk = cmd->sk;
7836 sock_hold(match->sk);
7840 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7843 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7845 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7846 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7847 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7850 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7851 dev_class, 3, NULL);
7857 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7859 struct mgmt_cp_set_local_name ev;
7860 struct mgmt_pending_cmd *cmd;
7865 memset(&ev, 0, sizeof(ev));
7866 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7867 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7869 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7871 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7873 /* If this is a HCI command related to powering on the
7874 * HCI dev don't send any mgmt signals.
7876 if (pending_find(MGMT_OP_SET_POWERED, hdev))
7880 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7881 cmd ? cmd->sk : NULL);
7884 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7885 u8 *rand192, u8 *hash256, u8 *rand256,
7888 struct mgmt_pending_cmd *cmd;
7890 BT_DBG("%s status %u", hdev->name, status);
7892 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7897 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7898 mgmt_status(status));
7900 struct mgmt_rp_read_local_oob_data rp;
7901 size_t rp_size = sizeof(rp);
7903 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7904 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7906 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7907 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7908 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7910 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7913 mgmt_cmd_complete(cmd->sk, hdev->id,
7914 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7918 mgmt_pending_remove(cmd);
7921 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7925 for (i = 0; i < uuid_count; i++) {
7926 if (!memcmp(uuid, uuids[i], 16))
7933 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7937 while (parsed < eir_len) {
7938 u8 field_len = eir[0];
7945 if (eir_len - parsed < field_len + 1)
7949 case EIR_UUID16_ALL:
7950 case EIR_UUID16_SOME:
7951 for (i = 0; i + 3 <= field_len; i += 2) {
7952 memcpy(uuid, bluetooth_base_uuid, 16);
7953 uuid[13] = eir[i + 3];
7954 uuid[12] = eir[i + 2];
7955 if (has_uuid(uuid, uuid_count, uuids))
7959 case EIR_UUID32_ALL:
7960 case EIR_UUID32_SOME:
7961 for (i = 0; i + 5 <= field_len; i += 4) {
7962 memcpy(uuid, bluetooth_base_uuid, 16);
7963 uuid[15] = eir[i + 5];
7964 uuid[14] = eir[i + 4];
7965 uuid[13] = eir[i + 3];
7966 uuid[12] = eir[i + 2];
7967 if (has_uuid(uuid, uuid_count, uuids))
7971 case EIR_UUID128_ALL:
7972 case EIR_UUID128_SOME:
7973 for (i = 0; i + 17 <= field_len; i += 16) {
7974 memcpy(uuid, eir + i + 2, 16);
7975 if (has_uuid(uuid, uuid_count, uuids))
7981 parsed += field_len + 1;
7982 eir += field_len + 1;
7988 static void restart_le_scan(struct hci_dev *hdev)
7990 /* If controller is not scanning we are done. */
7991 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7994 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7995 hdev->discovery.scan_start +
7996 hdev->discovery.scan_duration))
7999 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
8000 DISCOV_LE_RESTART_DELAY);
8003 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8004 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8006 /* If a RSSI threshold has been specified, and
8007 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8008 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8009 * is set, let it through for further processing, as we might need to
8012 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8013 * the results are also dropped.
8015 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8016 (rssi == HCI_RSSI_INVALID ||
8017 (rssi < hdev->discovery.rssi &&
8018 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8021 if (hdev->discovery.uuid_count != 0) {
8022 /* If a list of UUIDs is provided in filter, results with no
8023 * matching UUID should be dropped.
8025 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8026 hdev->discovery.uuids) &&
8027 !eir_has_uuids(scan_rsp, scan_rsp_len,
8028 hdev->discovery.uuid_count,
8029 hdev->discovery.uuids))
8033 /* If duplicate filtering does not report RSSI changes, then restart
8034 * scanning to ensure updated result with updated RSSI values.
8036 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8037 restart_le_scan(hdev);
8039 /* Validate RSSI value against the RSSI threshold once more. */
8040 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8041 rssi < hdev->discovery.rssi)
8048 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8049 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8050 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8053 struct mgmt_ev_device_found *ev = (void *)buf;
8056 /* Don't send events for a non-kernel initiated discovery. With
8057 * LE one exception is if we have pend_le_reports > 0 in which
8058 * case we're doing passive scanning and want these events.
8060 if (!hci_discovery_active(hdev)) {
8061 if (link_type == ACL_LINK)
8063 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
8067 if (hdev->discovery.result_filtering) {
8068 /* We are using service discovery */
8069 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8074 /* Make sure that the buffer is big enough. The 5 extra bytes
8075 * are for the potential CoD field.
8077 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8080 memset(buf, 0, sizeof(buf));
8082 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8083 * RSSI value was reported as 0 when not available. This behavior
8084 * is kept when using device discovery. This is required for full
8085 * backwards compatibility with the API.
8087 * However when using service discovery, the value 127 will be
8088 * returned when the RSSI is not available.
8090 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8091 link_type == ACL_LINK)
8094 bacpy(&ev->addr.bdaddr, bdaddr);
8095 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8097 ev->flags = cpu_to_le32(flags);
8100 /* Copy EIR or advertising data into event */
8101 memcpy(ev->eir, eir, eir_len);
8103 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
8104 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8107 if (scan_rsp_len > 0)
8108 /* Append scan response data to event */
8109 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8111 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8112 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8114 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8117 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8118 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8120 struct mgmt_ev_device_found *ev;
8121 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8124 ev = (struct mgmt_ev_device_found *) buf;
8126 memset(buf, 0, sizeof(buf));
8128 bacpy(&ev->addr.bdaddr, bdaddr);
8129 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8132 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8135 ev->eir_len = cpu_to_le16(eir_len);
8137 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8140 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8142 struct mgmt_ev_discovering ev;
8144 BT_DBG("%s discovering %u", hdev->name, discovering);
8146 memset(&ev, 0, sizeof(ev));
8147 ev.type = hdev->discovery.type;
8148 ev.discovering = discovering;
8150 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8153 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8155 BT_DBG("%s status %u", hdev->name, status);
8158 void mgmt_reenable_advertising(struct hci_dev *hdev)
8160 struct hci_request req;
8162 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
8163 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
8166 hci_req_init(&req, hdev);
8167 enable_advertising(&req);
8168 hci_req_run(&req, adv_enable_complete);
8171 static struct hci_mgmt_chan chan = {
8172 .channel = HCI_CHANNEL_CONTROL,
8173 .handler_count = ARRAY_SIZE(mgmt_handlers),
8174 .handlers = mgmt_handlers,
8175 .hdev_init = mgmt_init_hdev,
8180 return hci_mgmt_chan_register(&chan);
8183 void mgmt_exit(void)
8185 hci_mgmt_chan_unregister(&chan);