2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
39 #define MGMT_VERSION 1
40 #define MGMT_REVISION 9
42 static const u16 mgmt_commands[] = {
43 MGMT_OP_READ_INDEX_LIST,
46 MGMT_OP_SET_DISCOVERABLE,
47 MGMT_OP_SET_CONNECTABLE,
48 MGMT_OP_SET_FAST_CONNECTABLE,
50 MGMT_OP_SET_LINK_SECURITY,
54 MGMT_OP_SET_DEV_CLASS,
55 MGMT_OP_SET_LOCAL_NAME,
58 MGMT_OP_LOAD_LINK_KEYS,
59 MGMT_OP_LOAD_LONG_TERM_KEYS,
61 MGMT_OP_GET_CONNECTIONS,
62 MGMT_OP_PIN_CODE_REPLY,
63 MGMT_OP_PIN_CODE_NEG_REPLY,
64 MGMT_OP_SET_IO_CAPABILITY,
66 MGMT_OP_CANCEL_PAIR_DEVICE,
67 MGMT_OP_UNPAIR_DEVICE,
68 MGMT_OP_USER_CONFIRM_REPLY,
69 MGMT_OP_USER_CONFIRM_NEG_REPLY,
70 MGMT_OP_USER_PASSKEY_REPLY,
71 MGMT_OP_USER_PASSKEY_NEG_REPLY,
72 MGMT_OP_READ_LOCAL_OOB_DATA,
73 MGMT_OP_ADD_REMOTE_OOB_DATA,
74 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
75 MGMT_OP_START_DISCOVERY,
76 MGMT_OP_STOP_DISCOVERY,
79 MGMT_OP_UNBLOCK_DEVICE,
80 MGMT_OP_SET_DEVICE_ID,
81 MGMT_OP_SET_ADVERTISING,
83 MGMT_OP_SET_STATIC_ADDRESS,
84 MGMT_OP_SET_SCAN_PARAMS,
85 MGMT_OP_SET_SECURE_CONN,
86 MGMT_OP_SET_DEBUG_KEYS,
89 MGMT_OP_GET_CONN_INFO,
90 MGMT_OP_GET_CLOCK_INFO,
92 MGMT_OP_REMOVE_DEVICE,
93 MGMT_OP_LOAD_CONN_PARAM,
94 MGMT_OP_READ_UNCONF_INDEX_LIST,
95 MGMT_OP_READ_CONFIG_INFO,
96 MGMT_OP_SET_EXTERNAL_CONFIG,
97 MGMT_OP_SET_PUBLIC_ADDRESS,
98 MGMT_OP_START_SERVICE_DISCOVERY,
101 static const u16 mgmt_events[] = {
102 MGMT_EV_CONTROLLER_ERROR,
104 MGMT_EV_INDEX_REMOVED,
105 MGMT_EV_NEW_SETTINGS,
106 MGMT_EV_CLASS_OF_DEV_CHANGED,
107 MGMT_EV_LOCAL_NAME_CHANGED,
108 MGMT_EV_NEW_LINK_KEY,
109 MGMT_EV_NEW_LONG_TERM_KEY,
110 MGMT_EV_DEVICE_CONNECTED,
111 MGMT_EV_DEVICE_DISCONNECTED,
112 MGMT_EV_CONNECT_FAILED,
113 MGMT_EV_PIN_CODE_REQUEST,
114 MGMT_EV_USER_CONFIRM_REQUEST,
115 MGMT_EV_USER_PASSKEY_REQUEST,
117 MGMT_EV_DEVICE_FOUND,
119 MGMT_EV_DEVICE_BLOCKED,
120 MGMT_EV_DEVICE_UNBLOCKED,
121 MGMT_EV_DEVICE_UNPAIRED,
122 MGMT_EV_PASSKEY_NOTIFY,
125 MGMT_EV_DEVICE_ADDED,
126 MGMT_EV_DEVICE_REMOVED,
127 MGMT_EV_NEW_CONN_PARAM,
128 MGMT_EV_UNCONF_INDEX_ADDED,
129 MGMT_EV_UNCONF_INDEX_REMOVED,
130 MGMT_EV_NEW_CONFIG_OPTIONS,
133 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
135 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
136 "\x00\x00\x00\x00\x00\x00\x00\x00"
138 struct mgmt_pending_cmd {
139 struct list_head list;
146 int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status);
149 /* HCI to MGMT error code conversion table */
150 static u8 mgmt_status_table[] = {
152 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
153 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
154 MGMT_STATUS_FAILED, /* Hardware Failure */
155 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
156 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
157 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
158 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
159 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
160 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
161 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
162 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
163 MGMT_STATUS_BUSY, /* Command Disallowed */
164 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
165 MGMT_STATUS_REJECTED, /* Rejected Security */
166 MGMT_STATUS_REJECTED, /* Rejected Personal */
167 MGMT_STATUS_TIMEOUT, /* Host Timeout */
168 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
169 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
170 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
171 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
172 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
173 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
174 MGMT_STATUS_BUSY, /* Repeated Attempts */
175 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
176 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
177 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
178 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
179 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
180 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
181 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
182 MGMT_STATUS_FAILED, /* Unspecified Error */
183 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
184 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
185 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
186 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
187 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
188 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
189 MGMT_STATUS_FAILED, /* Unit Link Key Used */
190 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
191 MGMT_STATUS_TIMEOUT, /* Instant Passed */
192 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
193 MGMT_STATUS_FAILED, /* Transaction Collision */
194 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
195 MGMT_STATUS_REJECTED, /* QoS Rejected */
196 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
197 MGMT_STATUS_REJECTED, /* Insufficient Security */
198 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
199 MGMT_STATUS_BUSY, /* Role Switch Pending */
200 MGMT_STATUS_FAILED, /* Slot Violation */
201 MGMT_STATUS_FAILED, /* Role Switch Failed */
202 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
203 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
204 MGMT_STATUS_BUSY, /* Host Busy Pairing */
205 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
206 MGMT_STATUS_BUSY, /* Controller Busy */
207 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
208 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
209 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
210 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
211 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
214 static u8 mgmt_status(u8 hci_status)
216 if (hci_status < ARRAY_SIZE(mgmt_status_table))
217 return mgmt_status_table[hci_status];
219 return MGMT_STATUS_FAILED;
222 static int mgmt_send_event(u16 event, struct hci_dev *hdev,
223 unsigned short channel, void *data, u16 data_len,
224 struct sock *skip_sk)
227 struct mgmt_hdr *hdr;
229 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
233 hdr = (void *) skb_put(skb, sizeof(*hdr));
234 hdr->opcode = cpu_to_le16(event);
236 hdr->index = cpu_to_le16(hdev->id);
238 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
239 hdr->len = cpu_to_le16(data_len);
242 memcpy(skb_put(skb, data_len), data, data_len);
245 __net_timestamp(skb);
247 hci_send_to_channel(channel, skb, skip_sk);
253 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
254 struct sock *skip_sk)
256 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
260 static int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
263 struct mgmt_hdr *hdr;
264 struct mgmt_ev_cmd_status *ev;
267 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
269 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
273 hdr = (void *) skb_put(skb, sizeof(*hdr));
275 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
276 hdr->index = cpu_to_le16(index);
277 hdr->len = cpu_to_le16(sizeof(*ev));
279 ev = (void *) skb_put(skb, sizeof(*ev));
281 ev->opcode = cpu_to_le16(cmd);
283 err = sock_queue_rcv_skb(sk, skb);
290 static int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
291 void *rp, size_t rp_len)
294 struct mgmt_hdr *hdr;
295 struct mgmt_ev_cmd_complete *ev;
298 BT_DBG("sock %p", sk);
300 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
304 hdr = (void *) skb_put(skb, sizeof(*hdr));
306 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
307 hdr->index = cpu_to_le16(index);
308 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
310 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
311 ev->opcode = cpu_to_le16(cmd);
315 memcpy(ev->data, rp, rp_len);
317 err = sock_queue_rcv_skb(sk, skb);
324 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
327 struct mgmt_rp_read_version rp;
329 BT_DBG("sock %p", sk);
331 rp.version = MGMT_VERSION;
332 rp.revision = cpu_to_le16(MGMT_REVISION);
334 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
338 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
341 struct mgmt_rp_read_commands *rp;
342 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
343 const u16 num_events = ARRAY_SIZE(mgmt_events);
348 BT_DBG("sock %p", sk);
350 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
352 rp = kmalloc(rp_size, GFP_KERNEL);
356 rp->num_commands = cpu_to_le16(num_commands);
357 rp->num_events = cpu_to_le16(num_events);
359 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
360 put_unaligned_le16(mgmt_commands[i], opcode);
362 for (i = 0; i < num_events; i++, opcode++)
363 put_unaligned_le16(mgmt_events[i], opcode);
365 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
372 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
375 struct mgmt_rp_read_index_list *rp;
381 BT_DBG("sock %p", sk);
383 read_lock(&hci_dev_list_lock);
386 list_for_each_entry(d, &hci_dev_list, list) {
387 if (d->dev_type == HCI_BREDR &&
388 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
392 rp_len = sizeof(*rp) + (2 * count);
393 rp = kmalloc(rp_len, GFP_ATOMIC);
395 read_unlock(&hci_dev_list_lock);
400 list_for_each_entry(d, &hci_dev_list, list) {
401 if (hci_dev_test_flag(d, HCI_SETUP) ||
402 hci_dev_test_flag(d, HCI_CONFIG) ||
403 hci_dev_test_flag(d, HCI_USER_CHANNEL))
406 /* Devices marked as raw-only are neither configured
407 * nor unconfigured controllers.
409 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
412 if (d->dev_type == HCI_BREDR &&
413 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
414 rp->index[count++] = cpu_to_le16(d->id);
415 BT_DBG("Added hci%u", d->id);
419 rp->num_controllers = cpu_to_le16(count);
420 rp_len = sizeof(*rp) + (2 * count);
422 read_unlock(&hci_dev_list_lock);
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
432 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
433 void *data, u16 data_len)
435 struct mgmt_rp_read_unconf_index_list *rp;
441 BT_DBG("sock %p", sk);
443 read_lock(&hci_dev_list_lock);
446 list_for_each_entry(d, &hci_dev_list, list) {
447 if (d->dev_type == HCI_BREDR &&
448 hci_dev_test_flag(d, HCI_UNCONFIGURED))
452 rp_len = sizeof(*rp) + (2 * count);
453 rp = kmalloc(rp_len, GFP_ATOMIC);
455 read_unlock(&hci_dev_list_lock);
460 list_for_each_entry(d, &hci_dev_list, list) {
461 if (hci_dev_test_flag(d, HCI_SETUP) ||
462 hci_dev_test_flag(d, HCI_CONFIG) ||
463 hci_dev_test_flag(d, HCI_USER_CHANNEL))
466 /* Devices marked as raw-only are neither configured
467 * nor unconfigured controllers.
469 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
472 if (d->dev_type == HCI_BREDR &&
473 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
474 rp->index[count++] = cpu_to_le16(d->id);
475 BT_DBG("Added hci%u", d->id);
479 rp->num_controllers = cpu_to_le16(count);
480 rp_len = sizeof(*rp) + (2 * count);
482 read_unlock(&hci_dev_list_lock);
484 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
485 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
492 static bool is_configured(struct hci_dev *hdev)
494 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
495 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
498 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
499 !bacmp(&hdev->public_addr, BDADDR_ANY))
505 static __le32 get_missing_options(struct hci_dev *hdev)
509 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
510 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
511 options |= MGMT_OPTION_EXTERNAL_CONFIG;
513 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
514 !bacmp(&hdev->public_addr, BDADDR_ANY))
515 options |= MGMT_OPTION_PUBLIC_ADDRESS;
517 return cpu_to_le32(options);
520 static int new_options(struct hci_dev *hdev, struct sock *skip)
522 __le32 options = get_missing_options(hdev);
524 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
525 sizeof(options), skip);
528 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
530 __le32 options = get_missing_options(hdev);
532 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
536 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
537 void *data, u16 data_len)
539 struct mgmt_rp_read_config_info rp;
542 BT_DBG("sock %p %s", sk, hdev->name);
546 memset(&rp, 0, sizeof(rp));
547 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
549 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
550 options |= MGMT_OPTION_EXTERNAL_CONFIG;
552 if (hdev->set_bdaddr)
553 options |= MGMT_OPTION_PUBLIC_ADDRESS;
555 rp.supported_options = cpu_to_le32(options);
556 rp.missing_options = get_missing_options(hdev);
558 hci_dev_unlock(hdev);
560 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
564 static u32 get_supported_settings(struct hci_dev *hdev)
568 settings |= MGMT_SETTING_POWERED;
569 settings |= MGMT_SETTING_BONDABLE;
570 settings |= MGMT_SETTING_DEBUG_KEYS;
571 settings |= MGMT_SETTING_CONNECTABLE;
572 settings |= MGMT_SETTING_DISCOVERABLE;
574 if (lmp_bredr_capable(hdev)) {
575 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
576 settings |= MGMT_SETTING_FAST_CONNECTABLE;
577 settings |= MGMT_SETTING_BREDR;
578 settings |= MGMT_SETTING_LINK_SECURITY;
580 if (lmp_ssp_capable(hdev)) {
581 settings |= MGMT_SETTING_SSP;
582 settings |= MGMT_SETTING_HS;
585 if (lmp_sc_capable(hdev))
586 settings |= MGMT_SETTING_SECURE_CONN;
589 if (lmp_le_capable(hdev)) {
590 settings |= MGMT_SETTING_LE;
591 settings |= MGMT_SETTING_ADVERTISING;
592 settings |= MGMT_SETTING_SECURE_CONN;
593 settings |= MGMT_SETTING_PRIVACY;
594 settings |= MGMT_SETTING_STATIC_ADDRESS;
597 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
599 settings |= MGMT_SETTING_CONFIGURATION;
604 static u32 get_current_settings(struct hci_dev *hdev)
608 if (hdev_is_powered(hdev))
609 settings |= MGMT_SETTING_POWERED;
611 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
612 settings |= MGMT_SETTING_CONNECTABLE;
614 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
615 settings |= MGMT_SETTING_FAST_CONNECTABLE;
617 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
618 settings |= MGMT_SETTING_DISCOVERABLE;
620 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
621 settings |= MGMT_SETTING_BONDABLE;
623 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
624 settings |= MGMT_SETTING_BREDR;
626 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
627 settings |= MGMT_SETTING_LE;
629 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
630 settings |= MGMT_SETTING_LINK_SECURITY;
632 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
633 settings |= MGMT_SETTING_SSP;
635 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
636 settings |= MGMT_SETTING_HS;
638 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
639 settings |= MGMT_SETTING_ADVERTISING;
641 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
642 settings |= MGMT_SETTING_SECURE_CONN;
644 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
645 settings |= MGMT_SETTING_DEBUG_KEYS;
647 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
648 settings |= MGMT_SETTING_PRIVACY;
650 /* The current setting for static address has two purposes. The
651 * first is to indicate if the static address will be used and
652 * the second is to indicate if it is actually set.
654 * This means if the static address is not configured, this flag
655 * will never bet set. If the address is configured, then if the
656 * address is actually used decides if the flag is set or not.
658 * For single mode LE only controllers and dual-mode controllers
659 * with BR/EDR disabled, the existence of the static address will
662 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
663 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
664 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
665 if (bacmp(&hdev->static_addr, BDADDR_ANY))
666 settings |= MGMT_SETTING_STATIC_ADDRESS;
672 #define PNP_INFO_SVCLASS_ID 0x1200
674 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
676 u8 *ptr = data, *uuids_start = NULL;
677 struct bt_uuid *uuid;
682 list_for_each_entry(uuid, &hdev->uuids, list) {
685 if (uuid->size != 16)
688 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
692 if (uuid16 == PNP_INFO_SVCLASS_ID)
698 uuids_start[1] = EIR_UUID16_ALL;
702 /* Stop if not enough space to put next UUID */
703 if ((ptr - data) + sizeof(u16) > len) {
704 uuids_start[1] = EIR_UUID16_SOME;
708 *ptr++ = (uuid16 & 0x00ff);
709 *ptr++ = (uuid16 & 0xff00) >> 8;
710 uuids_start[0] += sizeof(uuid16);
716 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
718 u8 *ptr = data, *uuids_start = NULL;
719 struct bt_uuid *uuid;
724 list_for_each_entry(uuid, &hdev->uuids, list) {
725 if (uuid->size != 32)
731 uuids_start[1] = EIR_UUID32_ALL;
735 /* Stop if not enough space to put next UUID */
736 if ((ptr - data) + sizeof(u32) > len) {
737 uuids_start[1] = EIR_UUID32_SOME;
741 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
743 uuids_start[0] += sizeof(u32);
749 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
751 u8 *ptr = data, *uuids_start = NULL;
752 struct bt_uuid *uuid;
757 list_for_each_entry(uuid, &hdev->uuids, list) {
758 if (uuid->size != 128)
764 uuids_start[1] = EIR_UUID128_ALL;
768 /* Stop if not enough space to put next UUID */
769 if ((ptr - data) + 16 > len) {
770 uuids_start[1] = EIR_UUID128_SOME;
774 memcpy(ptr, uuid->uuid, 16);
776 uuids_start[0] += 16;
782 static struct mgmt_pending_cmd *mgmt_pending_find(u16 opcode,
783 struct hci_dev *hdev)
785 struct mgmt_pending_cmd *cmd;
787 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
788 if (cmd->opcode == opcode)
795 static struct mgmt_pending_cmd *mgmt_pending_find_data(u16 opcode,
796 struct hci_dev *hdev,
799 struct mgmt_pending_cmd *cmd;
801 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
802 if (cmd->user_data != data)
804 if (cmd->opcode == opcode)
811 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
816 name_len = strlen(hdev->dev_name);
818 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
820 if (name_len > max_len) {
822 ptr[1] = EIR_NAME_SHORT;
824 ptr[1] = EIR_NAME_COMPLETE;
826 ptr[0] = name_len + 1;
828 memcpy(ptr + 2, hdev->dev_name, name_len);
830 ad_len += (name_len + 2);
831 ptr += (name_len + 2);
837 static void update_scan_rsp_data(struct hci_request *req)
839 struct hci_dev *hdev = req->hdev;
840 struct hci_cp_le_set_scan_rsp_data cp;
843 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
846 memset(&cp, 0, sizeof(cp));
848 len = create_scan_rsp_data(hdev, cp.data);
850 if (hdev->scan_rsp_data_len == len &&
851 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
854 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
855 hdev->scan_rsp_data_len = len;
859 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
862 static u8 get_adv_discov_flags(struct hci_dev *hdev)
864 struct mgmt_pending_cmd *cmd;
866 /* If there's a pending mgmt command the flags will not yet have
867 * their final values, so check for this first.
869 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
871 struct mgmt_mode *cp = cmd->param;
873 return LE_AD_GENERAL;
874 else if (cp->val == 0x02)
875 return LE_AD_LIMITED;
877 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
878 return LE_AD_LIMITED;
879 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
880 return LE_AD_GENERAL;
886 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
888 u8 ad_len = 0, flags = 0;
890 flags |= get_adv_discov_flags(hdev);
892 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
893 flags |= LE_AD_NO_BREDR;
896 BT_DBG("adv flags 0x%02x", flags);
906 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
908 ptr[1] = EIR_TX_POWER;
909 ptr[2] = (u8) hdev->adv_tx_power;
918 static void update_adv_data(struct hci_request *req)
920 struct hci_dev *hdev = req->hdev;
921 struct hci_cp_le_set_adv_data cp;
924 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
927 memset(&cp, 0, sizeof(cp));
929 len = create_adv_data(hdev, cp.data);
931 if (hdev->adv_data_len == len &&
932 memcmp(cp.data, hdev->adv_data, len) == 0)
935 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
936 hdev->adv_data_len = len;
940 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
943 int mgmt_update_adv_data(struct hci_dev *hdev)
945 struct hci_request req;
947 hci_req_init(&req, hdev);
948 update_adv_data(&req);
950 return hci_req_run(&req, NULL);
953 static void create_eir(struct hci_dev *hdev, u8 *data)
958 name_len = strlen(hdev->dev_name);
964 ptr[1] = EIR_NAME_SHORT;
966 ptr[1] = EIR_NAME_COMPLETE;
968 /* EIR Data length */
969 ptr[0] = name_len + 1;
971 memcpy(ptr + 2, hdev->dev_name, name_len);
973 ptr += (name_len + 2);
976 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
978 ptr[1] = EIR_TX_POWER;
979 ptr[2] = (u8) hdev->inq_tx_power;
984 if (hdev->devid_source > 0) {
986 ptr[1] = EIR_DEVICE_ID;
988 put_unaligned_le16(hdev->devid_source, ptr + 2);
989 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
990 put_unaligned_le16(hdev->devid_product, ptr + 6);
991 put_unaligned_le16(hdev->devid_version, ptr + 8);
996 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
997 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
998 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1001 static void update_eir(struct hci_request *req)
1003 struct hci_dev *hdev = req->hdev;
1004 struct hci_cp_write_eir cp;
1006 if (!hdev_is_powered(hdev))
1009 if (!lmp_ext_inq_capable(hdev))
1012 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1015 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1018 memset(&cp, 0, sizeof(cp));
1020 create_eir(hdev, cp.data);
1022 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1025 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1027 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1030 static u8 get_service_classes(struct hci_dev *hdev)
1032 struct bt_uuid *uuid;
1035 list_for_each_entry(uuid, &hdev->uuids, list)
1036 val |= uuid->svc_hint;
1041 static void update_class(struct hci_request *req)
1043 struct hci_dev *hdev = req->hdev;
1046 BT_DBG("%s", hdev->name);
1048 if (!hdev_is_powered(hdev))
1051 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1054 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1057 cod[0] = hdev->minor_class;
1058 cod[1] = hdev->major_class;
1059 cod[2] = get_service_classes(hdev);
1061 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1064 if (memcmp(cod, hdev->dev_class, 3) == 0)
1067 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1070 static bool get_connectable(struct hci_dev *hdev)
1072 struct mgmt_pending_cmd *cmd;
1074 /* If there's a pending mgmt command the flag will not yet have
1075 * it's final value, so check for this first.
1077 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1079 struct mgmt_mode *cp = cmd->param;
1083 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1086 static void disable_advertising(struct hci_request *req)
1090 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1093 static void enable_advertising(struct hci_request *req)
1095 struct hci_dev *hdev = req->hdev;
1096 struct hci_cp_le_set_adv_param cp;
1097 u8 own_addr_type, enable = 0x01;
1100 if (hci_conn_num(hdev, LE_LINK) > 0)
1103 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1104 disable_advertising(req);
1106 /* Clear the HCI_LE_ADV bit temporarily so that the
1107 * hci_update_random_address knows that it's safe to go ahead
1108 * and write a new random address. The flag will be set back on
1109 * as soon as the SET_ADV_ENABLE HCI command completes.
1111 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1113 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1116 connectable = get_connectable(hdev);
1118 /* Set require_privacy to true only when non-connectable
1119 * advertising is used. In that case it is fine to use a
1120 * non-resolvable private address.
1122 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1125 memset(&cp, 0, sizeof(cp));
1126 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1127 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1128 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1129 cp.own_address_type = own_addr_type;
1130 cp.channel_map = hdev->le_adv_channel_map;
1132 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1134 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1137 static void service_cache_off(struct work_struct *work)
1139 struct hci_dev *hdev = container_of(work, struct hci_dev,
1140 service_cache.work);
1141 struct hci_request req;
1143 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1146 hci_req_init(&req, hdev);
1153 hci_dev_unlock(hdev);
1155 hci_req_run(&req, NULL);
1158 static void rpa_expired(struct work_struct *work)
1160 struct hci_dev *hdev = container_of(work, struct hci_dev,
1162 struct hci_request req;
1166 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1168 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1171 /* The generation of a new RPA and programming it into the
1172 * controller happens in the enable_advertising() function.
1174 hci_req_init(&req, hdev);
1175 enable_advertising(&req);
1176 hci_req_run(&req, NULL);
1179 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1181 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1184 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1185 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1187 /* Non-mgmt controlled devices get this bit set
1188 * implicitly so that pairing works for them, however
1189 * for mgmt we require user-space to explicitly enable
1192 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1195 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1196 void *data, u16 data_len)
1198 struct mgmt_rp_read_info rp;
1200 BT_DBG("sock %p %s", sk, hdev->name);
1204 memset(&rp, 0, sizeof(rp));
1206 bacpy(&rp.bdaddr, &hdev->bdaddr);
1208 rp.version = hdev->hci_ver;
1209 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1211 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1212 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1214 memcpy(rp.dev_class, hdev->dev_class, 3);
1216 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1217 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1219 hci_dev_unlock(hdev);
1221 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1225 static void mgmt_pending_free(struct mgmt_pending_cmd *cmd)
1232 static struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1233 struct hci_dev *hdev,
1234 void *data, u16 len)
1236 struct mgmt_pending_cmd *cmd;
1238 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1242 cmd->opcode = opcode;
1243 cmd->index = hdev->id;
1245 cmd->param = kmemdup(data, len, GFP_KERNEL);
1251 cmd->param_len = len;
1256 list_add(&cmd->list, &hdev->mgmt_pending);
1261 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1262 void (*cb)(struct mgmt_pending_cmd *cmd,
1266 struct mgmt_pending_cmd *cmd, *tmp;
1268 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1269 if (opcode > 0 && cmd->opcode != opcode)
1276 static void mgmt_pending_remove(struct mgmt_pending_cmd *cmd)
1278 list_del(&cmd->list);
1279 mgmt_pending_free(cmd);
1282 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1284 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1286 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1290 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1292 BT_DBG("%s status 0x%02x", hdev->name, status);
1294 if (hci_conn_count(hdev) == 0) {
1295 cancel_delayed_work(&hdev->power_off);
1296 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1300 static bool hci_stop_discovery(struct hci_request *req)
1302 struct hci_dev *hdev = req->hdev;
1303 struct hci_cp_remote_name_req_cancel cp;
1304 struct inquiry_entry *e;
1306 switch (hdev->discovery.state) {
1307 case DISCOVERY_FINDING:
1308 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1309 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1311 cancel_delayed_work(&hdev->le_scan_disable);
1312 hci_req_add_le_scan_disable(req);
1317 case DISCOVERY_RESOLVING:
1318 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1323 bacpy(&cp.bdaddr, &e->data.bdaddr);
1324 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1330 /* Passive scanning */
1331 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1332 hci_req_add_le_scan_disable(req);
1342 static int clean_up_hci_state(struct hci_dev *hdev)
1344 struct hci_request req;
1345 struct hci_conn *conn;
1346 bool discov_stopped;
1349 hci_req_init(&req, hdev);
1351 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1352 test_bit(HCI_PSCAN, &hdev->flags)) {
1354 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1357 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1358 disable_advertising(&req);
1360 discov_stopped = hci_stop_discovery(&req);
1362 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1363 struct hci_cp_disconnect dc;
1364 struct hci_cp_reject_conn_req rej;
1366 switch (conn->state) {
1369 dc.handle = cpu_to_le16(conn->handle);
1370 dc.reason = 0x15; /* Terminated due to Power Off */
1371 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1374 if (conn->type == LE_LINK)
1375 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1377 else if (conn->type == ACL_LINK)
1378 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1382 bacpy(&rej.bdaddr, &conn->dst);
1383 rej.reason = 0x15; /* Terminated due to Power Off */
1384 if (conn->type == ACL_LINK)
1385 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1387 else if (conn->type == SCO_LINK)
1388 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1394 err = hci_req_run(&req, clean_up_hci_complete);
1395 if (!err && discov_stopped)
1396 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1401 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1404 struct mgmt_mode *cp = data;
1405 struct mgmt_pending_cmd *cmd;
1408 BT_DBG("request for %s", hdev->name);
1410 if (cp->val != 0x00 && cp->val != 0x01)
1411 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1412 MGMT_STATUS_INVALID_PARAMS);
1416 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1417 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1422 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1423 cancel_delayed_work(&hdev->power_off);
1426 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1428 err = mgmt_powered(hdev, 1);
1433 if (!!cp->val == hdev_is_powered(hdev)) {
1434 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1438 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1445 queue_work(hdev->req_workqueue, &hdev->power_on);
1448 /* Disconnect connections, stop scans, etc */
1449 err = clean_up_hci_state(hdev);
1451 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1452 HCI_POWER_OFF_TIMEOUT);
1454 /* ENODATA means there were no HCI commands queued */
1455 if (err == -ENODATA) {
1456 cancel_delayed_work(&hdev->power_off);
1457 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1463 hci_dev_unlock(hdev);
1467 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1471 ev = cpu_to_le32(get_current_settings(hdev));
1473 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1476 int mgmt_new_settings(struct hci_dev *hdev)
1478 return new_settings(hdev, NULL);
1483 struct hci_dev *hdev;
1487 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1489 struct cmd_lookup *match = data;
1491 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1493 list_del(&cmd->list);
1495 if (match->sk == NULL) {
1496 match->sk = cmd->sk;
1497 sock_hold(match->sk);
1500 mgmt_pending_free(cmd);
1503 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1507 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1508 mgmt_pending_remove(cmd);
1511 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1513 if (cmd->cmd_complete) {
1516 cmd->cmd_complete(cmd, *status);
1517 mgmt_pending_remove(cmd);
1522 cmd_status_rsp(cmd, data);
1525 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1527 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1528 cmd->param, cmd->param_len);
1531 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1533 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1534 cmd->param, sizeof(struct mgmt_addr_info));
1537 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1539 if (!lmp_bredr_capable(hdev))
1540 return MGMT_STATUS_NOT_SUPPORTED;
1541 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1542 return MGMT_STATUS_REJECTED;
1544 return MGMT_STATUS_SUCCESS;
1547 static u8 mgmt_le_support(struct hci_dev *hdev)
1549 if (!lmp_le_capable(hdev))
1550 return MGMT_STATUS_NOT_SUPPORTED;
1551 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1552 return MGMT_STATUS_REJECTED;
1554 return MGMT_STATUS_SUCCESS;
1557 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1560 struct mgmt_pending_cmd *cmd;
1561 struct mgmt_mode *cp;
1562 struct hci_request req;
1565 BT_DBG("status 0x%02x", status);
1569 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1574 u8 mgmt_err = mgmt_status(status);
1575 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1576 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1582 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1585 if (hdev->discov_timeout > 0) {
1586 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1587 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1591 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1595 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1598 new_settings(hdev, cmd->sk);
1600 /* When the discoverable mode gets changed, make sure
1601 * that class of device has the limited discoverable
1602 * bit correctly set. Also update page scan based on whitelist
1605 hci_req_init(&req, hdev);
1606 __hci_update_page_scan(&req);
1608 hci_req_run(&req, NULL);
1611 mgmt_pending_remove(cmd);
1614 hci_dev_unlock(hdev);
1617 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1620 struct mgmt_cp_set_discoverable *cp = data;
1621 struct mgmt_pending_cmd *cmd;
1622 struct hci_request req;
1627 BT_DBG("request for %s", hdev->name);
1629 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1630 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1631 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1632 MGMT_STATUS_REJECTED);
1634 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1635 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1636 MGMT_STATUS_INVALID_PARAMS);
1638 timeout = __le16_to_cpu(cp->timeout);
1640 /* Disabling discoverable requires that no timeout is set,
1641 * and enabling limited discoverable requires a timeout.
1643 if ((cp->val == 0x00 && timeout > 0) ||
1644 (cp->val == 0x02 && timeout == 0))
1645 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1646 MGMT_STATUS_INVALID_PARAMS);
1650 if (!hdev_is_powered(hdev) && timeout > 0) {
1651 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1652 MGMT_STATUS_NOT_POWERED);
1656 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1657 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1658 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1663 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1664 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1665 MGMT_STATUS_REJECTED);
1669 if (!hdev_is_powered(hdev)) {
1670 bool changed = false;
1672 /* Setting limited discoverable when powered off is
1673 * not a valid operation since it requires a timeout
1674 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1676 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1677 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1681 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1686 err = new_settings(hdev, sk);
1691 /* If the current mode is the same, then just update the timeout
1692 * value with the new value. And if only the timeout gets updated,
1693 * then no need for any HCI transactions.
1695 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1696 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1697 HCI_LIMITED_DISCOVERABLE)) {
1698 cancel_delayed_work(&hdev->discov_off);
1699 hdev->discov_timeout = timeout;
1701 if (cp->val && hdev->discov_timeout > 0) {
1702 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1703 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1707 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1711 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1717 /* Cancel any potential discoverable timeout that might be
1718 * still active and store new timeout value. The arming of
1719 * the timeout happens in the complete handler.
1721 cancel_delayed_work(&hdev->discov_off);
1722 hdev->discov_timeout = timeout;
1724 /* Limited discoverable mode */
1725 if (cp->val == 0x02)
1726 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1728 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1730 hci_req_init(&req, hdev);
1732 /* The procedure for LE-only controllers is much simpler - just
1733 * update the advertising data.
1735 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1741 struct hci_cp_write_current_iac_lap hci_cp;
1743 if (cp->val == 0x02) {
1744 /* Limited discoverable mode */
1745 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1746 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1747 hci_cp.iac_lap[1] = 0x8b;
1748 hci_cp.iac_lap[2] = 0x9e;
1749 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1750 hci_cp.iac_lap[4] = 0x8b;
1751 hci_cp.iac_lap[5] = 0x9e;
1753 /* General discoverable mode */
1755 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1756 hci_cp.iac_lap[1] = 0x8b;
1757 hci_cp.iac_lap[2] = 0x9e;
1760 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1761 (hci_cp.num_iac * 3) + 1, &hci_cp);
1763 scan |= SCAN_INQUIRY;
1765 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1768 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1771 update_adv_data(&req);
1773 err = hci_req_run(&req, set_discoverable_complete);
1775 mgmt_pending_remove(cmd);
1778 hci_dev_unlock(hdev);
1782 static void write_fast_connectable(struct hci_request *req, bool enable)
1784 struct hci_dev *hdev = req->hdev;
1785 struct hci_cp_write_page_scan_activity acp;
1788 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1791 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1795 type = PAGE_SCAN_TYPE_INTERLACED;
1797 /* 160 msec page scan interval */
1798 acp.interval = cpu_to_le16(0x0100);
1800 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1802 /* default 1.28 sec page scan */
1803 acp.interval = cpu_to_le16(0x0800);
1806 acp.window = cpu_to_le16(0x0012);
1808 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1809 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1810 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1813 if (hdev->page_scan_type != type)
1814 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1817 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1820 struct mgmt_pending_cmd *cmd;
1821 struct mgmt_mode *cp;
1822 bool conn_changed, discov_changed;
1824 BT_DBG("status 0x%02x", status);
1828 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1833 u8 mgmt_err = mgmt_status(status);
1834 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1840 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1842 discov_changed = false;
1844 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1846 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1850 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1852 if (conn_changed || discov_changed) {
1853 new_settings(hdev, cmd->sk);
1854 hci_update_page_scan(hdev);
1856 mgmt_update_adv_data(hdev);
1857 hci_update_background_scan(hdev);
1861 mgmt_pending_remove(cmd);
1864 hci_dev_unlock(hdev);
1867 static int set_connectable_update_settings(struct hci_dev *hdev,
1868 struct sock *sk, u8 val)
1870 bool changed = false;
1873 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1877 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1879 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1880 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1883 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1888 hci_update_page_scan(hdev);
1889 hci_update_background_scan(hdev);
1890 return new_settings(hdev, sk);
1896 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1899 struct mgmt_mode *cp = data;
1900 struct mgmt_pending_cmd *cmd;
1901 struct hci_request req;
1905 BT_DBG("request for %s", hdev->name);
1907 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1908 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1909 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1910 MGMT_STATUS_REJECTED);
1912 if (cp->val != 0x00 && cp->val != 0x01)
1913 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1914 MGMT_STATUS_INVALID_PARAMS);
1918 if (!hdev_is_powered(hdev)) {
1919 err = set_connectable_update_settings(hdev, sk, cp->val);
1923 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1924 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1925 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1930 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1936 hci_req_init(&req, hdev);
1938 /* If BR/EDR is not enabled and we disable advertising as a
1939 * by-product of disabling connectable, we need to update the
1940 * advertising flags.
1942 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1944 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1945 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1947 update_adv_data(&req);
1948 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1952 /* If we don't have any whitelist entries just
1953 * disable all scanning. If there are entries
1954 * and we had both page and inquiry scanning
1955 * enabled then fall back to only page scanning.
1956 * Otherwise no changes are needed.
1958 if (list_empty(&hdev->whitelist))
1959 scan = SCAN_DISABLED;
1960 else if (test_bit(HCI_ISCAN, &hdev->flags))
1963 goto no_scan_update;
1965 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1966 hdev->discov_timeout > 0)
1967 cancel_delayed_work(&hdev->discov_off);
1970 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1974 /* Update the advertising parameters if necessary */
1975 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
1976 enable_advertising(&req);
1978 err = hci_req_run(&req, set_connectable_complete);
1980 mgmt_pending_remove(cmd);
1981 if (err == -ENODATA)
1982 err = set_connectable_update_settings(hdev, sk,
1988 hci_dev_unlock(hdev);
1992 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1995 struct mgmt_mode *cp = data;
1999 BT_DBG("request for %s", hdev->name);
2001 if (cp->val != 0x00 && cp->val != 0x01)
2002 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2003 MGMT_STATUS_INVALID_PARAMS);
2008 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
2010 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
2012 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2017 err = new_settings(hdev, sk);
2020 hci_dev_unlock(hdev);
2024 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2027 struct mgmt_mode *cp = data;
2028 struct mgmt_pending_cmd *cmd;
2032 BT_DBG("request for %s", hdev->name);
2034 status = mgmt_bredr_support(hdev);
2036 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2039 if (cp->val != 0x00 && cp->val != 0x01)
2040 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2041 MGMT_STATUS_INVALID_PARAMS);
2045 if (!hdev_is_powered(hdev)) {
2046 bool changed = false;
2048 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2049 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2053 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2058 err = new_settings(hdev, sk);
2063 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2064 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2071 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2072 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2076 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2082 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2084 mgmt_pending_remove(cmd);
2089 hci_dev_unlock(hdev);
2093 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2095 struct mgmt_mode *cp = data;
2096 struct mgmt_pending_cmd *cmd;
2100 BT_DBG("request for %s", hdev->name);
2102 status = mgmt_bredr_support(hdev);
2104 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2106 if (!lmp_ssp_capable(hdev))
2107 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2108 MGMT_STATUS_NOT_SUPPORTED);
2110 if (cp->val != 0x00 && cp->val != 0x01)
2111 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2112 MGMT_STATUS_INVALID_PARAMS);
2116 if (!hdev_is_powered(hdev)) {
2120 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2123 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2126 changed = test_and_clear_bit(HCI_HS_ENABLED,
2129 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2132 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2137 err = new_settings(hdev, sk);
2142 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2143 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2148 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2149 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2153 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2159 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2160 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2161 sizeof(cp->val), &cp->val);
2163 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2165 mgmt_pending_remove(cmd);
2170 hci_dev_unlock(hdev);
2174 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2176 struct mgmt_mode *cp = data;
2181 BT_DBG("request for %s", hdev->name);
2183 status = mgmt_bredr_support(hdev);
2185 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2187 if (!lmp_ssp_capable(hdev))
2188 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2189 MGMT_STATUS_NOT_SUPPORTED);
2191 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2192 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2193 MGMT_STATUS_REJECTED);
2195 if (cp->val != 0x00 && cp->val != 0x01)
2196 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2197 MGMT_STATUS_INVALID_PARAMS);
2201 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2202 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2208 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2210 if (hdev_is_powered(hdev)) {
2211 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2212 MGMT_STATUS_REJECTED);
2216 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2219 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2224 err = new_settings(hdev, sk);
2227 hci_dev_unlock(hdev);
2231 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2233 struct cmd_lookup match = { NULL, hdev };
2238 u8 mgmt_err = mgmt_status(status);
2240 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2245 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2247 new_settings(hdev, match.sk);
2252 /* Make sure the controller has a good default for
2253 * advertising data. Restrict the update to when LE
2254 * has actually been enabled. During power on, the
2255 * update in powered_update_hci will take care of it.
2257 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2258 struct hci_request req;
2260 hci_req_init(&req, hdev);
2261 update_adv_data(&req);
2262 update_scan_rsp_data(&req);
2263 __hci_update_background_scan(&req);
2264 hci_req_run(&req, NULL);
2268 hci_dev_unlock(hdev);
2271 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2273 struct mgmt_mode *cp = data;
2274 struct hci_cp_write_le_host_supported hci_cp;
2275 struct mgmt_pending_cmd *cmd;
2276 struct hci_request req;
2280 BT_DBG("request for %s", hdev->name);
2282 if (!lmp_le_capable(hdev))
2283 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2284 MGMT_STATUS_NOT_SUPPORTED);
2286 if (cp->val != 0x00 && cp->val != 0x01)
2287 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2288 MGMT_STATUS_INVALID_PARAMS);
2290 /* LE-only devices do not allow toggling LE on/off */
2291 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2292 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2293 MGMT_STATUS_REJECTED);
2298 enabled = lmp_host_le_capable(hdev);
2300 if (!hdev_is_powered(hdev) || val == enabled) {
2301 bool changed = false;
2303 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2304 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2308 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2309 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2313 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2318 err = new_settings(hdev, sk);
2323 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2324 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2325 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2330 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2336 hci_req_init(&req, hdev);
2338 memset(&hci_cp, 0, sizeof(hci_cp));
2342 hci_cp.simul = 0x00;
2344 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2345 disable_advertising(&req);
2348 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2351 err = hci_req_run(&req, le_enable_complete);
2353 mgmt_pending_remove(cmd);
2356 hci_dev_unlock(hdev);
2360 /* This is a helper function to test for pending mgmt commands that can
2361 * cause CoD or EIR HCI commands. We can only allow one such pending
2362 * mgmt command at a time since otherwise we cannot easily track what
2363 * the current values are, will be, and based on that calculate if a new
2364 * HCI command needs to be sent and if yes with what value.
2366 static bool pending_eir_or_class(struct hci_dev *hdev)
2368 struct mgmt_pending_cmd *cmd;
2370 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2371 switch (cmd->opcode) {
2372 case MGMT_OP_ADD_UUID:
2373 case MGMT_OP_REMOVE_UUID:
2374 case MGMT_OP_SET_DEV_CLASS:
2375 case MGMT_OP_SET_POWERED:
2383 static const u8 bluetooth_base_uuid[] = {
2384 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2385 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2388 static u8 get_uuid_size(const u8 *uuid)
2392 if (memcmp(uuid, bluetooth_base_uuid, 12))
2395 val = get_unaligned_le32(&uuid[12]);
2402 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2404 struct mgmt_pending_cmd *cmd;
2408 cmd = mgmt_pending_find(mgmt_op, hdev);
2412 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2413 mgmt_status(status), hdev->dev_class, 3);
2415 mgmt_pending_remove(cmd);
2418 hci_dev_unlock(hdev);
2421 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2423 BT_DBG("status 0x%02x", status);
2425 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2428 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2430 struct mgmt_cp_add_uuid *cp = data;
2431 struct mgmt_pending_cmd *cmd;
2432 struct hci_request req;
2433 struct bt_uuid *uuid;
2436 BT_DBG("request for %s", hdev->name);
2440 if (pending_eir_or_class(hdev)) {
2441 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2446 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2452 memcpy(uuid->uuid, cp->uuid, 16);
2453 uuid->svc_hint = cp->svc_hint;
2454 uuid->size = get_uuid_size(cp->uuid);
2456 list_add_tail(&uuid->list, &hdev->uuids);
2458 hci_req_init(&req, hdev);
2463 err = hci_req_run(&req, add_uuid_complete);
2465 if (err != -ENODATA)
2468 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2469 hdev->dev_class, 3);
2473 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2482 hci_dev_unlock(hdev);
2486 static bool enable_service_cache(struct hci_dev *hdev)
2488 if (!hdev_is_powered(hdev))
2491 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2492 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2500 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2502 BT_DBG("status 0x%02x", status);
2504 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2507 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2510 struct mgmt_cp_remove_uuid *cp = data;
2511 struct mgmt_pending_cmd *cmd;
2512 struct bt_uuid *match, *tmp;
2513 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2514 struct hci_request req;
2517 BT_DBG("request for %s", hdev->name);
2521 if (pending_eir_or_class(hdev)) {
2522 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2527 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2528 hci_uuids_clear(hdev);
2530 if (enable_service_cache(hdev)) {
2531 err = mgmt_cmd_complete(sk, hdev->id,
2532 MGMT_OP_REMOVE_UUID,
2533 0, hdev->dev_class, 3);
2542 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2543 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2546 list_del(&match->list);
2552 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2553 MGMT_STATUS_INVALID_PARAMS);
2558 hci_req_init(&req, hdev);
2563 err = hci_req_run(&req, remove_uuid_complete);
2565 if (err != -ENODATA)
2568 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2569 hdev->dev_class, 3);
2573 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2582 hci_dev_unlock(hdev);
2586 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2588 BT_DBG("status 0x%02x", status);
2590 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2593 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2596 struct mgmt_cp_set_dev_class *cp = data;
2597 struct mgmt_pending_cmd *cmd;
2598 struct hci_request req;
2601 BT_DBG("request for %s", hdev->name);
2603 if (!lmp_bredr_capable(hdev))
2604 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2605 MGMT_STATUS_NOT_SUPPORTED);
2609 if (pending_eir_or_class(hdev)) {
2610 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2615 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2616 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2617 MGMT_STATUS_INVALID_PARAMS);
2621 hdev->major_class = cp->major;
2622 hdev->minor_class = cp->minor;
2624 if (!hdev_is_powered(hdev)) {
2625 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2626 hdev->dev_class, 3);
2630 hci_req_init(&req, hdev);
2632 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2633 hci_dev_unlock(hdev);
2634 cancel_delayed_work_sync(&hdev->service_cache);
2641 err = hci_req_run(&req, set_class_complete);
2643 if (err != -ENODATA)
2646 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2647 hdev->dev_class, 3);
2651 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2660 hci_dev_unlock(hdev);
2664 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2667 struct mgmt_cp_load_link_keys *cp = data;
2668 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2669 sizeof(struct mgmt_link_key_info));
2670 u16 key_count, expected_len;
2674 BT_DBG("request for %s", hdev->name);
2676 if (!lmp_bredr_capable(hdev))
2677 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2678 MGMT_STATUS_NOT_SUPPORTED);
2680 key_count = __le16_to_cpu(cp->key_count);
2681 if (key_count > max_key_count) {
2682 BT_ERR("load_link_keys: too big key_count value %u",
2684 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2685 MGMT_STATUS_INVALID_PARAMS);
2688 expected_len = sizeof(*cp) + key_count *
2689 sizeof(struct mgmt_link_key_info);
2690 if (expected_len != len) {
2691 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2693 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2694 MGMT_STATUS_INVALID_PARAMS);
2697 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2698 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2699 MGMT_STATUS_INVALID_PARAMS);
2701 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2704 for (i = 0; i < key_count; i++) {
2705 struct mgmt_link_key_info *key = &cp->keys[i];
2707 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2708 return mgmt_cmd_status(sk, hdev->id,
2709 MGMT_OP_LOAD_LINK_KEYS,
2710 MGMT_STATUS_INVALID_PARAMS);
2715 hci_link_keys_clear(hdev);
2718 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2721 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2725 new_settings(hdev, NULL);
2727 for (i = 0; i < key_count; i++) {
2728 struct mgmt_link_key_info *key = &cp->keys[i];
2730 /* Always ignore debug keys and require a new pairing if
2731 * the user wants to use them.
2733 if (key->type == HCI_LK_DEBUG_COMBINATION)
2736 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2737 key->type, key->pin_len, NULL);
2740 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2742 hci_dev_unlock(hdev);
2747 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2748 u8 addr_type, struct sock *skip_sk)
2750 struct mgmt_ev_device_unpaired ev;
2752 bacpy(&ev.addr.bdaddr, bdaddr);
2753 ev.addr.type = addr_type;
2755 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2759 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2762 struct mgmt_cp_unpair_device *cp = data;
2763 struct mgmt_rp_unpair_device rp;
2764 struct hci_cp_disconnect dc;
2765 struct mgmt_pending_cmd *cmd;
2766 struct hci_conn *conn;
2769 memset(&rp, 0, sizeof(rp));
2770 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2771 rp.addr.type = cp->addr.type;
2773 if (!bdaddr_type_is_valid(cp->addr.type))
2774 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2775 MGMT_STATUS_INVALID_PARAMS,
2778 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2779 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2780 MGMT_STATUS_INVALID_PARAMS,
2785 if (!hdev_is_powered(hdev)) {
2786 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2787 MGMT_STATUS_NOT_POWERED, &rp,
2792 if (cp->addr.type == BDADDR_BREDR) {
2793 /* If disconnection is requested, then look up the
2794 * connection. If the remote device is connected, it
2795 * will be later used to terminate the link.
2797 * Setting it to NULL explicitly will cause no
2798 * termination of the link.
2801 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2806 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2810 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2813 /* Defer clearing up the connection parameters
2814 * until closing to give a chance of keeping
2815 * them if a repairing happens.
2817 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2819 /* If disconnection is not requested, then
2820 * clear the connection variable so that the
2821 * link is not terminated.
2823 if (!cp->disconnect)
2827 if (cp->addr.type == BDADDR_LE_PUBLIC)
2828 addr_type = ADDR_LE_DEV_PUBLIC;
2830 addr_type = ADDR_LE_DEV_RANDOM;
2832 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2834 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2838 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2839 MGMT_STATUS_NOT_PAIRED, &rp,
2844 /* If the connection variable is set, then termination of the
2845 * link is requested.
2848 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2850 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2854 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2861 cmd->cmd_complete = addr_cmd_complete;
2863 dc.handle = cpu_to_le16(conn->handle);
2864 dc.reason = 0x13; /* Remote User Terminated Connection */
2865 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2867 mgmt_pending_remove(cmd);
2870 hci_dev_unlock(hdev);
2874 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2877 struct mgmt_cp_disconnect *cp = data;
2878 struct mgmt_rp_disconnect rp;
2879 struct mgmt_pending_cmd *cmd;
2880 struct hci_conn *conn;
2885 memset(&rp, 0, sizeof(rp));
2886 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2887 rp.addr.type = cp->addr.type;
2889 if (!bdaddr_type_is_valid(cp->addr.type))
2890 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2891 MGMT_STATUS_INVALID_PARAMS,
2896 if (!test_bit(HCI_UP, &hdev->flags)) {
2897 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2898 MGMT_STATUS_NOT_POWERED, &rp,
2903 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2904 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2905 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2909 if (cp->addr.type == BDADDR_BREDR)
2910 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2913 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2915 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2916 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2917 MGMT_STATUS_NOT_CONNECTED, &rp,
2922 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2928 cmd->cmd_complete = generic_cmd_complete;
2930 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2932 mgmt_pending_remove(cmd);
2935 hci_dev_unlock(hdev);
2939 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2941 switch (link_type) {
2943 switch (addr_type) {
2944 case ADDR_LE_DEV_PUBLIC:
2945 return BDADDR_LE_PUBLIC;
2948 /* Fallback to LE Random address type */
2949 return BDADDR_LE_RANDOM;
2953 /* Fallback to BR/EDR type */
2954 return BDADDR_BREDR;
2958 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2961 struct mgmt_rp_get_connections *rp;
2971 if (!hdev_is_powered(hdev)) {
2972 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2973 MGMT_STATUS_NOT_POWERED);
2978 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2979 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2983 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2984 rp = kmalloc(rp_len, GFP_KERNEL);
2991 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2992 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2994 bacpy(&rp->addr[i].bdaddr, &c->dst);
2995 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2996 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3001 rp->conn_count = cpu_to_le16(i);
3003 /* Recalculate length in case of filtered SCO connections, etc */
3004 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3006 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3012 hci_dev_unlock(hdev);
3016 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3017 struct mgmt_cp_pin_code_neg_reply *cp)
3019 struct mgmt_pending_cmd *cmd;
3022 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3027 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3028 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3030 mgmt_pending_remove(cmd);
3035 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3038 struct hci_conn *conn;
3039 struct mgmt_cp_pin_code_reply *cp = data;
3040 struct hci_cp_pin_code_reply reply;
3041 struct mgmt_pending_cmd *cmd;
3048 if (!hdev_is_powered(hdev)) {
3049 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3050 MGMT_STATUS_NOT_POWERED);
3054 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3056 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3057 MGMT_STATUS_NOT_CONNECTED);
3061 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3062 struct mgmt_cp_pin_code_neg_reply ncp;
3064 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3066 BT_ERR("PIN code is not 16 bytes long");
3068 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3070 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3071 MGMT_STATUS_INVALID_PARAMS);
3076 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3082 cmd->cmd_complete = addr_cmd_complete;
3084 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3085 reply.pin_len = cp->pin_len;
3086 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3088 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3090 mgmt_pending_remove(cmd);
3093 hci_dev_unlock(hdev);
3097 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3100 struct mgmt_cp_set_io_capability *cp = data;
3104 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3105 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3106 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3110 hdev->io_capability = cp->io_capability;
3112 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3113 hdev->io_capability);
3115 hci_dev_unlock(hdev);
3117 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3121 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3123 struct hci_dev *hdev = conn->hdev;
3124 struct mgmt_pending_cmd *cmd;
3126 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3127 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3130 if (cmd->user_data != conn)
3139 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3141 struct mgmt_rp_pair_device rp;
3142 struct hci_conn *conn = cmd->user_data;
3145 bacpy(&rp.addr.bdaddr, &conn->dst);
3146 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3148 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3149 status, &rp, sizeof(rp));
3151 /* So we don't get further callbacks for this connection */
3152 conn->connect_cfm_cb = NULL;
3153 conn->security_cfm_cb = NULL;
3154 conn->disconn_cfm_cb = NULL;
3156 hci_conn_drop(conn);
3158 /* The device is paired so there is no need to remove
3159 * its connection parameters anymore.
3161 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3168 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3170 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3171 struct mgmt_pending_cmd *cmd;
3173 cmd = find_pairing(conn);
3175 cmd->cmd_complete(cmd, status);
3176 mgmt_pending_remove(cmd);
3180 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3182 struct mgmt_pending_cmd *cmd;
3184 BT_DBG("status %u", status);
3186 cmd = find_pairing(conn);
3188 BT_DBG("Unable to find a pending command");
3192 cmd->cmd_complete(cmd, mgmt_status(status));
3193 mgmt_pending_remove(cmd);
3196 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3198 struct mgmt_pending_cmd *cmd;
3200 BT_DBG("status %u", status);
3205 cmd = find_pairing(conn);
3207 BT_DBG("Unable to find a pending command");
3211 cmd->cmd_complete(cmd, mgmt_status(status));
3212 mgmt_pending_remove(cmd);
3215 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3218 struct mgmt_cp_pair_device *cp = data;
3219 struct mgmt_rp_pair_device rp;
3220 struct mgmt_pending_cmd *cmd;
3221 u8 sec_level, auth_type;
3222 struct hci_conn *conn;
3227 memset(&rp, 0, sizeof(rp));
3228 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3229 rp.addr.type = cp->addr.type;
3231 if (!bdaddr_type_is_valid(cp->addr.type))
3232 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3233 MGMT_STATUS_INVALID_PARAMS,
3236 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3237 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3238 MGMT_STATUS_INVALID_PARAMS,
3243 if (!hdev_is_powered(hdev)) {
3244 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3245 MGMT_STATUS_NOT_POWERED, &rp,
3250 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3251 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3252 MGMT_STATUS_ALREADY_PAIRED, &rp,
3257 sec_level = BT_SECURITY_MEDIUM;
3258 auth_type = HCI_AT_DEDICATED_BONDING;
3260 if (cp->addr.type == BDADDR_BREDR) {
3261 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3266 /* Convert from L2CAP channel address type to HCI address type
3268 if (cp->addr.type == BDADDR_LE_PUBLIC)
3269 addr_type = ADDR_LE_DEV_PUBLIC;
3271 addr_type = ADDR_LE_DEV_RANDOM;
3273 /* When pairing a new device, it is expected to remember
3274 * this device for future connections. Adding the connection
3275 * parameter information ahead of time allows tracking
3276 * of the slave preferred values and will speed up any
3277 * further connection establishment.
3279 * If connection parameters already exist, then they
3280 * will be kept and this function does nothing.
3282 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3284 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3285 sec_level, HCI_LE_CONN_TIMEOUT,
3292 if (PTR_ERR(conn) == -EBUSY)
3293 status = MGMT_STATUS_BUSY;
3294 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3295 status = MGMT_STATUS_NOT_SUPPORTED;
3296 else if (PTR_ERR(conn) == -ECONNREFUSED)
3297 status = MGMT_STATUS_REJECTED;
3299 status = MGMT_STATUS_CONNECT_FAILED;
3301 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3302 status, &rp, sizeof(rp));
3306 if (conn->connect_cfm_cb) {
3307 hci_conn_drop(conn);
3308 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3309 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3313 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3316 hci_conn_drop(conn);
3320 cmd->cmd_complete = pairing_complete;
3322 /* For LE, just connecting isn't a proof that the pairing finished */
3323 if (cp->addr.type == BDADDR_BREDR) {
3324 conn->connect_cfm_cb = pairing_complete_cb;
3325 conn->security_cfm_cb = pairing_complete_cb;
3326 conn->disconn_cfm_cb = pairing_complete_cb;
3328 conn->connect_cfm_cb = le_pairing_complete_cb;
3329 conn->security_cfm_cb = le_pairing_complete_cb;
3330 conn->disconn_cfm_cb = le_pairing_complete_cb;
3333 conn->io_capability = cp->io_cap;
3334 cmd->user_data = hci_conn_get(conn);
3336 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3337 hci_conn_security(conn, sec_level, auth_type, true)) {
3338 cmd->cmd_complete(cmd, 0);
3339 mgmt_pending_remove(cmd);
3345 hci_dev_unlock(hdev);
3349 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3352 struct mgmt_addr_info *addr = data;
3353 struct mgmt_pending_cmd *cmd;
3354 struct hci_conn *conn;
3361 if (!hdev_is_powered(hdev)) {
3362 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3363 MGMT_STATUS_NOT_POWERED);
3367 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3369 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3370 MGMT_STATUS_INVALID_PARAMS);
3374 conn = cmd->user_data;
3376 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3377 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3378 MGMT_STATUS_INVALID_PARAMS);
3382 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3383 mgmt_pending_remove(cmd);
3385 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3386 addr, sizeof(*addr));
3388 hci_dev_unlock(hdev);
3392 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3393 struct mgmt_addr_info *addr, u16 mgmt_op,
3394 u16 hci_op, __le32 passkey)
3396 struct mgmt_pending_cmd *cmd;
3397 struct hci_conn *conn;
3402 if (!hdev_is_powered(hdev)) {
3403 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3404 MGMT_STATUS_NOT_POWERED, addr,
3409 if (addr->type == BDADDR_BREDR)
3410 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3412 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3415 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3416 MGMT_STATUS_NOT_CONNECTED, addr,
3421 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3422 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3424 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3425 MGMT_STATUS_SUCCESS, addr,
3428 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3429 MGMT_STATUS_FAILED, addr,
3435 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3441 cmd->cmd_complete = addr_cmd_complete;
3443 /* Continue with pairing via HCI */
3444 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3445 struct hci_cp_user_passkey_reply cp;
3447 bacpy(&cp.bdaddr, &addr->bdaddr);
3448 cp.passkey = passkey;
3449 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3451 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3455 mgmt_pending_remove(cmd);
3458 hci_dev_unlock(hdev);
3462 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3463 void *data, u16 len)
3465 struct mgmt_cp_pin_code_neg_reply *cp = data;
3469 return user_pairing_resp(sk, hdev, &cp->addr,
3470 MGMT_OP_PIN_CODE_NEG_REPLY,
3471 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3474 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3477 struct mgmt_cp_user_confirm_reply *cp = data;
3481 if (len != sizeof(*cp))
3482 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3483 MGMT_STATUS_INVALID_PARAMS);
3485 return user_pairing_resp(sk, hdev, &cp->addr,
3486 MGMT_OP_USER_CONFIRM_REPLY,
3487 HCI_OP_USER_CONFIRM_REPLY, 0);
3490 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3491 void *data, u16 len)
3493 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3497 return user_pairing_resp(sk, hdev, &cp->addr,
3498 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3499 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3502 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3505 struct mgmt_cp_user_passkey_reply *cp = data;
3509 return user_pairing_resp(sk, hdev, &cp->addr,
3510 MGMT_OP_USER_PASSKEY_REPLY,
3511 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3514 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3515 void *data, u16 len)
3517 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3521 return user_pairing_resp(sk, hdev, &cp->addr,
3522 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3523 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3526 static void update_name(struct hci_request *req)
3528 struct hci_dev *hdev = req->hdev;
3529 struct hci_cp_write_local_name cp;
3531 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3533 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3536 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3538 struct mgmt_cp_set_local_name *cp;
3539 struct mgmt_pending_cmd *cmd;
3541 BT_DBG("status 0x%02x", status);
3545 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3552 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3553 mgmt_status(status));
3555 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3558 mgmt_pending_remove(cmd);
3561 hci_dev_unlock(hdev);
3564 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3567 struct mgmt_cp_set_local_name *cp = data;
3568 struct mgmt_pending_cmd *cmd;
3569 struct hci_request req;
3576 /* If the old values are the same as the new ones just return a
3577 * direct command complete event.
3579 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3580 !memcmp(hdev->short_name, cp->short_name,
3581 sizeof(hdev->short_name))) {
3582 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3587 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3589 if (!hdev_is_powered(hdev)) {
3590 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3592 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3597 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3603 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3609 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3611 hci_req_init(&req, hdev);
3613 if (lmp_bredr_capable(hdev)) {
3618 /* The name is stored in the scan response data and so
3619 * no need to udpate the advertising data here.
3621 if (lmp_le_capable(hdev))
3622 update_scan_rsp_data(&req);
3624 err = hci_req_run(&req, set_name_complete);
3626 mgmt_pending_remove(cmd);
3629 hci_dev_unlock(hdev);
3633 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3634 void *data, u16 data_len)
3636 struct mgmt_pending_cmd *cmd;
3639 BT_DBG("%s", hdev->name);
3643 if (!hdev_is_powered(hdev)) {
3644 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3645 MGMT_STATUS_NOT_POWERED);
3649 if (!lmp_ssp_capable(hdev)) {
3650 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3651 MGMT_STATUS_NOT_SUPPORTED);
3655 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3656 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3661 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3667 if (bredr_sc_enabled(hdev))
3668 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3671 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3674 mgmt_pending_remove(cmd);
3677 hci_dev_unlock(hdev);
3681 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3682 void *data, u16 len)
3684 struct mgmt_addr_info *addr = data;
3687 BT_DBG("%s ", hdev->name);
3689 if (!bdaddr_type_is_valid(addr->type))
3690 return mgmt_cmd_complete(sk, hdev->id,
3691 MGMT_OP_ADD_REMOTE_OOB_DATA,
3692 MGMT_STATUS_INVALID_PARAMS,
3693 addr, sizeof(*addr));
3697 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3698 struct mgmt_cp_add_remote_oob_data *cp = data;
3701 if (cp->addr.type != BDADDR_BREDR) {
3702 err = mgmt_cmd_complete(sk, hdev->id,
3703 MGMT_OP_ADD_REMOTE_OOB_DATA,
3704 MGMT_STATUS_INVALID_PARAMS,
3705 &cp->addr, sizeof(cp->addr));
3709 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3710 cp->addr.type, cp->hash,
3711 cp->rand, NULL, NULL);
3713 status = MGMT_STATUS_FAILED;
3715 status = MGMT_STATUS_SUCCESS;
3717 err = mgmt_cmd_complete(sk, hdev->id,
3718 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3719 &cp->addr, sizeof(cp->addr));
3720 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3721 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3722 u8 *rand192, *hash192, *rand256, *hash256;
3725 if (bdaddr_type_is_le(cp->addr.type)) {
3726 /* Enforce zero-valued 192-bit parameters as
3727 * long as legacy SMP OOB isn't implemented.
3729 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3730 memcmp(cp->hash192, ZERO_KEY, 16)) {
3731 err = mgmt_cmd_complete(sk, hdev->id,
3732 MGMT_OP_ADD_REMOTE_OOB_DATA,
3733 MGMT_STATUS_INVALID_PARAMS,
3734 addr, sizeof(*addr));
3741 /* In case one of the P-192 values is set to zero,
3742 * then just disable OOB data for P-192.
3744 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3745 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3749 rand192 = cp->rand192;
3750 hash192 = cp->hash192;
3754 /* In case one of the P-256 values is set to zero, then just
3755 * disable OOB data for P-256.
3757 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3758 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3762 rand256 = cp->rand256;
3763 hash256 = cp->hash256;
3766 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3767 cp->addr.type, hash192, rand192,
3770 status = MGMT_STATUS_FAILED;
3772 status = MGMT_STATUS_SUCCESS;
3774 err = mgmt_cmd_complete(sk, hdev->id,
3775 MGMT_OP_ADD_REMOTE_OOB_DATA,
3776 status, &cp->addr, sizeof(cp->addr));
3778 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3779 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3780 MGMT_STATUS_INVALID_PARAMS);
3784 hci_dev_unlock(hdev);
3788 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3789 void *data, u16 len)
3791 struct mgmt_cp_remove_remote_oob_data *cp = data;
3795 BT_DBG("%s", hdev->name);
3797 if (cp->addr.type != BDADDR_BREDR)
3798 return mgmt_cmd_complete(sk, hdev->id,
3799 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3800 MGMT_STATUS_INVALID_PARAMS,
3801 &cp->addr, sizeof(cp->addr));
3805 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3806 hci_remote_oob_data_clear(hdev);
3807 status = MGMT_STATUS_SUCCESS;
3811 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3813 status = MGMT_STATUS_INVALID_PARAMS;
3815 status = MGMT_STATUS_SUCCESS;
3818 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3819 status, &cp->addr, sizeof(cp->addr));
3821 hci_dev_unlock(hdev);
3825 static bool trigger_discovery(struct hci_request *req, u8 *status)
3827 struct hci_dev *hdev = req->hdev;
3828 struct hci_cp_le_set_scan_param param_cp;
3829 struct hci_cp_le_set_scan_enable enable_cp;
3830 struct hci_cp_inquiry inq_cp;
3831 /* General inquiry access code (GIAC) */
3832 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3836 switch (hdev->discovery.type) {
3837 case DISCOV_TYPE_BREDR:
3838 *status = mgmt_bredr_support(hdev);
3842 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3843 *status = MGMT_STATUS_BUSY;
3847 hci_inquiry_cache_flush(hdev);
3849 memset(&inq_cp, 0, sizeof(inq_cp));
3850 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3851 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3852 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3855 case DISCOV_TYPE_LE:
3856 case DISCOV_TYPE_INTERLEAVED:
3857 *status = mgmt_le_support(hdev);
3861 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3862 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
3863 *status = MGMT_STATUS_NOT_SUPPORTED;
3867 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
3868 /* Don't let discovery abort an outgoing
3869 * connection attempt that's using directed
3872 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3874 *status = MGMT_STATUS_REJECTED;
3878 disable_advertising(req);
3881 /* If controller is scanning, it means the background scanning
3882 * is running. Thus, we should temporarily stop it in order to
3883 * set the discovery scanning parameters.
3885 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
3886 hci_req_add_le_scan_disable(req);
3888 memset(¶m_cp, 0, sizeof(param_cp));
3890 /* All active scans will be done with either a resolvable
3891 * private address (when privacy feature has been enabled)
3892 * or non-resolvable private address.
3894 err = hci_update_random_address(req, true, &own_addr_type);
3896 *status = MGMT_STATUS_FAILED;
3900 param_cp.type = LE_SCAN_ACTIVE;
3901 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3902 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3903 param_cp.own_address_type = own_addr_type;
3904 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3907 memset(&enable_cp, 0, sizeof(enable_cp));
3908 enable_cp.enable = LE_SCAN_ENABLE;
3909 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3910 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3915 *status = MGMT_STATUS_INVALID_PARAMS;
3922 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3925 struct mgmt_pending_cmd *cmd;
3926 unsigned long timeout;
3928 BT_DBG("status %d", status);
3932 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3934 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3937 cmd->cmd_complete(cmd, mgmt_status(status));
3938 mgmt_pending_remove(cmd);
3942 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3946 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3948 /* If the scan involves LE scan, pick proper timeout to schedule
3949 * hdev->le_scan_disable that will stop it.
3951 switch (hdev->discovery.type) {
3952 case DISCOV_TYPE_LE:
3953 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3955 case DISCOV_TYPE_INTERLEAVED:
3956 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3958 case DISCOV_TYPE_BREDR:
3962 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3968 /* When service discovery is used and the controller has
3969 * a strict duplicate filter, it is important to remember
3970 * the start and duration of the scan. This is required
3971 * for restarting scanning during the discovery phase.
3973 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
3975 hdev->discovery.result_filtering) {
3976 hdev->discovery.scan_start = jiffies;
3977 hdev->discovery.scan_duration = timeout;
3980 queue_delayed_work(hdev->workqueue,
3981 &hdev->le_scan_disable, timeout);
3985 hci_dev_unlock(hdev);
3988 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3989 void *data, u16 len)
3991 struct mgmt_cp_start_discovery *cp = data;
3992 struct mgmt_pending_cmd *cmd;
3993 struct hci_request req;
3997 BT_DBG("%s", hdev->name);
4001 if (!hdev_is_powered(hdev)) {
4002 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4003 MGMT_STATUS_NOT_POWERED,
4004 &cp->type, sizeof(cp->type));
4008 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4009 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4010 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4011 MGMT_STATUS_BUSY, &cp->type,
4016 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4022 cmd->cmd_complete = generic_cmd_complete;
4024 /* Clear the discovery filter first to free any previously
4025 * allocated memory for the UUID list.
4027 hci_discovery_filter_clear(hdev);
4029 hdev->discovery.type = cp->type;
4030 hdev->discovery.report_invalid_rssi = false;
4032 hci_req_init(&req, hdev);
4034 if (!trigger_discovery(&req, &status)) {
4035 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4036 status, &cp->type, sizeof(cp->type));
4037 mgmt_pending_remove(cmd);
4041 err = hci_req_run(&req, start_discovery_complete);
4043 mgmt_pending_remove(cmd);
4047 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4050 hci_dev_unlock(hdev);
4054 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4057 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4061 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4062 void *data, u16 len)
4064 struct mgmt_cp_start_service_discovery *cp = data;
4065 struct mgmt_pending_cmd *cmd;
4066 struct hci_request req;
4067 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4068 u16 uuid_count, expected_len;
4072 BT_DBG("%s", hdev->name);
4076 if (!hdev_is_powered(hdev)) {
4077 err = mgmt_cmd_complete(sk, hdev->id,
4078 MGMT_OP_START_SERVICE_DISCOVERY,
4079 MGMT_STATUS_NOT_POWERED,
4080 &cp->type, sizeof(cp->type));
4084 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4085 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4086 err = mgmt_cmd_complete(sk, hdev->id,
4087 MGMT_OP_START_SERVICE_DISCOVERY,
4088 MGMT_STATUS_BUSY, &cp->type,
4093 uuid_count = __le16_to_cpu(cp->uuid_count);
4094 if (uuid_count > max_uuid_count) {
4095 BT_ERR("service_discovery: too big uuid_count value %u",
4097 err = mgmt_cmd_complete(sk, hdev->id,
4098 MGMT_OP_START_SERVICE_DISCOVERY,
4099 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4104 expected_len = sizeof(*cp) + uuid_count * 16;
4105 if (expected_len != len) {
4106 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4108 err = mgmt_cmd_complete(sk, hdev->id,
4109 MGMT_OP_START_SERVICE_DISCOVERY,
4110 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4115 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4122 cmd->cmd_complete = service_discovery_cmd_complete;
4124 /* Clear the discovery filter first to free any previously
4125 * allocated memory for the UUID list.
4127 hci_discovery_filter_clear(hdev);
4129 hdev->discovery.result_filtering = true;
4130 hdev->discovery.type = cp->type;
4131 hdev->discovery.rssi = cp->rssi;
4132 hdev->discovery.uuid_count = uuid_count;
4134 if (uuid_count > 0) {
4135 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4137 if (!hdev->discovery.uuids) {
4138 err = mgmt_cmd_complete(sk, hdev->id,
4139 MGMT_OP_START_SERVICE_DISCOVERY,
4141 &cp->type, sizeof(cp->type));
4142 mgmt_pending_remove(cmd);
4147 hci_req_init(&req, hdev);
4149 if (!trigger_discovery(&req, &status)) {
4150 err = mgmt_cmd_complete(sk, hdev->id,
4151 MGMT_OP_START_SERVICE_DISCOVERY,
4152 status, &cp->type, sizeof(cp->type));
4153 mgmt_pending_remove(cmd);
4157 err = hci_req_run(&req, start_discovery_complete);
4159 mgmt_pending_remove(cmd);
4163 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4166 hci_dev_unlock(hdev);
4170 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4172 struct mgmt_pending_cmd *cmd;
4174 BT_DBG("status %d", status);
4178 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4180 cmd->cmd_complete(cmd, mgmt_status(status));
4181 mgmt_pending_remove(cmd);
4185 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4187 hci_dev_unlock(hdev);
4190 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4193 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4194 struct mgmt_pending_cmd *cmd;
4195 struct hci_request req;
4198 BT_DBG("%s", hdev->name);
4202 if (!hci_discovery_active(hdev)) {
4203 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4204 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4205 sizeof(mgmt_cp->type));
4209 if (hdev->discovery.type != mgmt_cp->type) {
4210 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4211 MGMT_STATUS_INVALID_PARAMS,
4212 &mgmt_cp->type, sizeof(mgmt_cp->type));
4216 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4222 cmd->cmd_complete = generic_cmd_complete;
4224 hci_req_init(&req, hdev);
4226 hci_stop_discovery(&req);
4228 err = hci_req_run(&req, stop_discovery_complete);
4230 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4234 mgmt_pending_remove(cmd);
4236 /* If no HCI commands were sent we're done */
4237 if (err == -ENODATA) {
4238 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4239 &mgmt_cp->type, sizeof(mgmt_cp->type));
4240 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4244 hci_dev_unlock(hdev);
4248 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4251 struct mgmt_cp_confirm_name *cp = data;
4252 struct inquiry_entry *e;
4255 BT_DBG("%s", hdev->name);
4259 if (!hci_discovery_active(hdev)) {
4260 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4261 MGMT_STATUS_FAILED, &cp->addr,
4266 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4268 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4269 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4274 if (cp->name_known) {
4275 e->name_state = NAME_KNOWN;
4278 e->name_state = NAME_NEEDED;
4279 hci_inquiry_cache_update_resolve(hdev, e);
4282 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4283 &cp->addr, sizeof(cp->addr));
4286 hci_dev_unlock(hdev);
4290 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4293 struct mgmt_cp_block_device *cp = data;
4297 BT_DBG("%s", hdev->name);
4299 if (!bdaddr_type_is_valid(cp->addr.type))
4300 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4301 MGMT_STATUS_INVALID_PARAMS,
4302 &cp->addr, sizeof(cp->addr));
4306 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4309 status = MGMT_STATUS_FAILED;
4313 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4315 status = MGMT_STATUS_SUCCESS;
4318 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4319 &cp->addr, sizeof(cp->addr));
4321 hci_dev_unlock(hdev);
4326 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4329 struct mgmt_cp_unblock_device *cp = data;
4333 BT_DBG("%s", hdev->name);
4335 if (!bdaddr_type_is_valid(cp->addr.type))
4336 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4337 MGMT_STATUS_INVALID_PARAMS,
4338 &cp->addr, sizeof(cp->addr));
4342 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4345 status = MGMT_STATUS_INVALID_PARAMS;
4349 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4351 status = MGMT_STATUS_SUCCESS;
4354 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4355 &cp->addr, sizeof(cp->addr));
4357 hci_dev_unlock(hdev);
4362 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4365 struct mgmt_cp_set_device_id *cp = data;
4366 struct hci_request req;
4370 BT_DBG("%s", hdev->name);
4372 source = __le16_to_cpu(cp->source);
4374 if (source > 0x0002)
4375 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4376 MGMT_STATUS_INVALID_PARAMS);
4380 hdev->devid_source = source;
4381 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4382 hdev->devid_product = __le16_to_cpu(cp->product);
4383 hdev->devid_version = __le16_to_cpu(cp->version);
4385 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4388 hci_req_init(&req, hdev);
4390 hci_req_run(&req, NULL);
4392 hci_dev_unlock(hdev);
4397 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4400 struct cmd_lookup match = { NULL, hdev };
4405 u8 mgmt_err = mgmt_status(status);
4407 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4408 cmd_status_rsp, &mgmt_err);
4412 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4413 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4415 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4417 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4420 new_settings(hdev, match.sk);
4426 hci_dev_unlock(hdev);
4429 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4432 struct mgmt_mode *cp = data;
4433 struct mgmt_pending_cmd *cmd;
4434 struct hci_request req;
4438 BT_DBG("request for %s", hdev->name);
4440 status = mgmt_le_support(hdev);
4442 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4445 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4446 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4447 MGMT_STATUS_INVALID_PARAMS);
4453 /* The following conditions are ones which mean that we should
4454 * not do any HCI communication but directly send a mgmt
4455 * response to user space (after toggling the flag if
4458 if (!hdev_is_powered(hdev) ||
4459 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4460 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4461 hci_conn_num(hdev, LE_LINK) > 0 ||
4462 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4463 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4467 changed = !test_and_set_bit(HCI_ADVERTISING,
4469 if (cp->val == 0x02)
4470 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4472 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4474 changed = test_and_clear_bit(HCI_ADVERTISING,
4476 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4479 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4484 err = new_settings(hdev, sk);
4489 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4490 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4491 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4496 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4502 hci_req_init(&req, hdev);
4504 if (cp->val == 0x02)
4505 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4507 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4510 enable_advertising(&req);
4512 disable_advertising(&req);
4514 err = hci_req_run(&req, set_advertising_complete);
4516 mgmt_pending_remove(cmd);
4519 hci_dev_unlock(hdev);
4523 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4524 void *data, u16 len)
4526 struct mgmt_cp_set_static_address *cp = data;
4529 BT_DBG("%s", hdev->name);
4531 if (!lmp_le_capable(hdev))
4532 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4533 MGMT_STATUS_NOT_SUPPORTED);
4535 if (hdev_is_powered(hdev))
4536 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4537 MGMT_STATUS_REJECTED);
4539 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4540 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4541 return mgmt_cmd_status(sk, hdev->id,
4542 MGMT_OP_SET_STATIC_ADDRESS,
4543 MGMT_STATUS_INVALID_PARAMS);
4545 /* Two most significant bits shall be set */
4546 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4547 return mgmt_cmd_status(sk, hdev->id,
4548 MGMT_OP_SET_STATIC_ADDRESS,
4549 MGMT_STATUS_INVALID_PARAMS);
4554 bacpy(&hdev->static_addr, &cp->bdaddr);
4556 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4560 err = new_settings(hdev, sk);
4563 hci_dev_unlock(hdev);
4567 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4568 void *data, u16 len)
4570 struct mgmt_cp_set_scan_params *cp = data;
4571 __u16 interval, window;
4574 BT_DBG("%s", hdev->name);
4576 if (!lmp_le_capable(hdev))
4577 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4578 MGMT_STATUS_NOT_SUPPORTED);
4580 interval = __le16_to_cpu(cp->interval);
4582 if (interval < 0x0004 || interval > 0x4000)
4583 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4584 MGMT_STATUS_INVALID_PARAMS);
4586 window = __le16_to_cpu(cp->window);
4588 if (window < 0x0004 || window > 0x4000)
4589 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4590 MGMT_STATUS_INVALID_PARAMS);
4592 if (window > interval)
4593 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4594 MGMT_STATUS_INVALID_PARAMS);
4598 hdev->le_scan_interval = interval;
4599 hdev->le_scan_window = window;
4601 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4604 /* If background scan is running, restart it so new parameters are
4607 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4608 hdev->discovery.state == DISCOVERY_STOPPED) {
4609 struct hci_request req;
4611 hci_req_init(&req, hdev);
4613 hci_req_add_le_scan_disable(&req);
4614 hci_req_add_le_passive_scan(&req);
4616 hci_req_run(&req, NULL);
4619 hci_dev_unlock(hdev);
4624 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4627 struct mgmt_pending_cmd *cmd;
4629 BT_DBG("status 0x%02x", status);
4633 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4638 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4639 mgmt_status(status));
4641 struct mgmt_mode *cp = cmd->param;
4644 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4646 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4648 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4649 new_settings(hdev, cmd->sk);
4652 mgmt_pending_remove(cmd);
4655 hci_dev_unlock(hdev);
4658 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4659 void *data, u16 len)
4661 struct mgmt_mode *cp = data;
4662 struct mgmt_pending_cmd *cmd;
4663 struct hci_request req;
4666 BT_DBG("%s", hdev->name);
4668 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4669 hdev->hci_ver < BLUETOOTH_VER_1_2)
4670 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4671 MGMT_STATUS_NOT_SUPPORTED);
4673 if (cp->val != 0x00 && cp->val != 0x01)
4674 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4675 MGMT_STATUS_INVALID_PARAMS);
4679 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4680 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4685 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4686 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4691 if (!hdev_is_powered(hdev)) {
4692 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4693 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4695 new_settings(hdev, sk);
4699 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4706 hci_req_init(&req, hdev);
4708 write_fast_connectable(&req, cp->val);
4710 err = hci_req_run(&req, fast_connectable_complete);
4712 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4713 MGMT_STATUS_FAILED);
4714 mgmt_pending_remove(cmd);
4718 hci_dev_unlock(hdev);
4723 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4725 struct mgmt_pending_cmd *cmd;
4727 BT_DBG("status 0x%02x", status);
4731 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4736 u8 mgmt_err = mgmt_status(status);
4738 /* We need to restore the flag if related HCI commands
4741 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4743 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4745 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4746 new_settings(hdev, cmd->sk);
4749 mgmt_pending_remove(cmd);
4752 hci_dev_unlock(hdev);
4755 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4757 struct mgmt_mode *cp = data;
4758 struct mgmt_pending_cmd *cmd;
4759 struct hci_request req;
4762 BT_DBG("request for %s", hdev->name);
4764 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4765 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4766 MGMT_STATUS_NOT_SUPPORTED);
4768 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4769 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4770 MGMT_STATUS_REJECTED);
4772 if (cp->val != 0x00 && cp->val != 0x01)
4773 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4774 MGMT_STATUS_INVALID_PARAMS);
4778 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4779 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4783 if (!hdev_is_powered(hdev)) {
4785 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4786 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4787 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4788 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4789 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4792 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4794 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4798 err = new_settings(hdev, sk);
4802 /* Reject disabling when powered on */
4804 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4805 MGMT_STATUS_REJECTED);
4808 /* When configuring a dual-mode controller to operate
4809 * with LE only and using a static address, then switching
4810 * BR/EDR back on is not allowed.
4812 * Dual-mode controllers shall operate with the public
4813 * address as its identity address for BR/EDR and LE. So
4814 * reject the attempt to create an invalid configuration.
4816 * The same restrictions applies when secure connections
4817 * has been enabled. For BR/EDR this is a controller feature
4818 * while for LE it is a host stack feature. This means that
4819 * switching BR/EDR back on when secure connections has been
4820 * enabled is not a supported transaction.
4822 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4823 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4824 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4825 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4826 MGMT_STATUS_REJECTED);
4831 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4832 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4837 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4843 /* We need to flip the bit already here so that update_adv_data
4844 * generates the correct flags.
4846 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4848 hci_req_init(&req, hdev);
4850 write_fast_connectable(&req, false);
4851 __hci_update_page_scan(&req);
4853 /* Since only the advertising data flags will change, there
4854 * is no need to update the scan response data.
4856 update_adv_data(&req);
4858 err = hci_req_run(&req, set_bredr_complete);
4860 mgmt_pending_remove(cmd);
4863 hci_dev_unlock(hdev);
4867 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4869 struct mgmt_pending_cmd *cmd;
4870 struct mgmt_mode *cp;
4872 BT_DBG("%s status %u", hdev->name, status);
4876 cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4881 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4882 mgmt_status(status));
4890 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4891 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4894 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4895 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4898 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4899 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4903 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4904 new_settings(hdev, cmd->sk);
4907 mgmt_pending_remove(cmd);
4909 hci_dev_unlock(hdev);
4912 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4913 void *data, u16 len)
4915 struct mgmt_mode *cp = data;
4916 struct mgmt_pending_cmd *cmd;
4917 struct hci_request req;
4921 BT_DBG("request for %s", hdev->name);
4923 if (!lmp_sc_capable(hdev) &&
4924 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4925 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4926 MGMT_STATUS_NOT_SUPPORTED);
4928 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4929 lmp_sc_capable(hdev) &&
4930 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4931 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4932 MGMT_STATUS_REJECTED);
4934 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4935 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4936 MGMT_STATUS_INVALID_PARAMS);
4940 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4941 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4945 changed = !test_and_set_bit(HCI_SC_ENABLED,
4947 if (cp->val == 0x02)
4948 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4950 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4952 changed = test_and_clear_bit(HCI_SC_ENABLED,
4954 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4957 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4962 err = new_settings(hdev, sk);
4967 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4968 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4975 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4976 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4977 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4981 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4987 hci_req_init(&req, hdev);
4988 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4989 err = hci_req_run(&req, sc_enable_complete);
4991 mgmt_pending_remove(cmd);
4996 hci_dev_unlock(hdev);
5000 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5001 void *data, u16 len)
5003 struct mgmt_mode *cp = data;
5004 bool changed, use_changed;
5007 BT_DBG("request for %s", hdev->name);
5009 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5010 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5011 MGMT_STATUS_INVALID_PARAMS);
5016 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
5019 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
5022 if (cp->val == 0x02)
5023 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
5026 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
5029 if (hdev_is_powered(hdev) && use_changed &&
5030 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5031 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5032 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5033 sizeof(mode), &mode);
5036 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5041 err = new_settings(hdev, sk);
5044 hci_dev_unlock(hdev);
5048 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5051 struct mgmt_cp_set_privacy *cp = cp_data;
5055 BT_DBG("request for %s", hdev->name);
5057 if (!lmp_le_capable(hdev))
5058 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5059 MGMT_STATUS_NOT_SUPPORTED);
5061 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5062 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5063 MGMT_STATUS_INVALID_PARAMS);
5065 if (hdev_is_powered(hdev))
5066 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5067 MGMT_STATUS_REJECTED);
5071 /* If user space supports this command it is also expected to
5072 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5074 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5077 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
5078 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5079 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5081 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
5082 memset(hdev->irk, 0, sizeof(hdev->irk));
5083 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5086 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5091 err = new_settings(hdev, sk);
5094 hci_dev_unlock(hdev);
5098 static bool irk_is_valid(struct mgmt_irk_info *irk)
5100 switch (irk->addr.type) {
5101 case BDADDR_LE_PUBLIC:
5104 case BDADDR_LE_RANDOM:
5105 /* Two most significant bits shall be set */
5106 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5114 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5117 struct mgmt_cp_load_irks *cp = cp_data;
5118 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5119 sizeof(struct mgmt_irk_info));
5120 u16 irk_count, expected_len;
5123 BT_DBG("request for %s", hdev->name);
5125 if (!lmp_le_capable(hdev))
5126 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5127 MGMT_STATUS_NOT_SUPPORTED);
5129 irk_count = __le16_to_cpu(cp->irk_count);
5130 if (irk_count > max_irk_count) {
5131 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5132 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5133 MGMT_STATUS_INVALID_PARAMS);
5136 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5137 if (expected_len != len) {
5138 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5140 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5141 MGMT_STATUS_INVALID_PARAMS);
5144 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5146 for (i = 0; i < irk_count; i++) {
5147 struct mgmt_irk_info *key = &cp->irks[i];
5149 if (!irk_is_valid(key))
5150 return mgmt_cmd_status(sk, hdev->id,
5152 MGMT_STATUS_INVALID_PARAMS);
5157 hci_smp_irks_clear(hdev);
5159 for (i = 0; i < irk_count; i++) {
5160 struct mgmt_irk_info *irk = &cp->irks[i];
5163 if (irk->addr.type == BDADDR_LE_PUBLIC)
5164 addr_type = ADDR_LE_DEV_PUBLIC;
5166 addr_type = ADDR_LE_DEV_RANDOM;
5168 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5172 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5174 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5176 hci_dev_unlock(hdev);
5181 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5183 if (key->master != 0x00 && key->master != 0x01)
5186 switch (key->addr.type) {
5187 case BDADDR_LE_PUBLIC:
5190 case BDADDR_LE_RANDOM:
5191 /* Two most significant bits shall be set */
5192 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5200 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5201 void *cp_data, u16 len)
5203 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5204 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5205 sizeof(struct mgmt_ltk_info));
5206 u16 key_count, expected_len;
5209 BT_DBG("request for %s", hdev->name);
5211 if (!lmp_le_capable(hdev))
5212 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5213 MGMT_STATUS_NOT_SUPPORTED);
5215 key_count = __le16_to_cpu(cp->key_count);
5216 if (key_count > max_key_count) {
5217 BT_ERR("load_ltks: too big key_count value %u", key_count);
5218 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5219 MGMT_STATUS_INVALID_PARAMS);
5222 expected_len = sizeof(*cp) + key_count *
5223 sizeof(struct mgmt_ltk_info);
5224 if (expected_len != len) {
5225 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5227 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5228 MGMT_STATUS_INVALID_PARAMS);
5231 BT_DBG("%s key_count %u", hdev->name, key_count);
5233 for (i = 0; i < key_count; i++) {
5234 struct mgmt_ltk_info *key = &cp->keys[i];
5236 if (!ltk_is_valid(key))
5237 return mgmt_cmd_status(sk, hdev->id,
5238 MGMT_OP_LOAD_LONG_TERM_KEYS,
5239 MGMT_STATUS_INVALID_PARAMS);
5244 hci_smp_ltks_clear(hdev);
5246 for (i = 0; i < key_count; i++) {
5247 struct mgmt_ltk_info *key = &cp->keys[i];
5248 u8 type, addr_type, authenticated;
5250 if (key->addr.type == BDADDR_LE_PUBLIC)
5251 addr_type = ADDR_LE_DEV_PUBLIC;
5253 addr_type = ADDR_LE_DEV_RANDOM;
5255 switch (key->type) {
5256 case MGMT_LTK_UNAUTHENTICATED:
5257 authenticated = 0x00;
5258 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5260 case MGMT_LTK_AUTHENTICATED:
5261 authenticated = 0x01;
5262 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5264 case MGMT_LTK_P256_UNAUTH:
5265 authenticated = 0x00;
5266 type = SMP_LTK_P256;
5268 case MGMT_LTK_P256_AUTH:
5269 authenticated = 0x01;
5270 type = SMP_LTK_P256;
5272 case MGMT_LTK_P256_DEBUG:
5273 authenticated = 0x00;
5274 type = SMP_LTK_P256_DEBUG;
5279 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5280 authenticated, key->val, key->enc_size, key->ediv,
5284 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5287 hci_dev_unlock(hdev);
5292 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5294 struct hci_conn *conn = cmd->user_data;
5295 struct mgmt_rp_get_conn_info rp;
5298 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5300 if (status == MGMT_STATUS_SUCCESS) {
5301 rp.rssi = conn->rssi;
5302 rp.tx_power = conn->tx_power;
5303 rp.max_tx_power = conn->max_tx_power;
5305 rp.rssi = HCI_RSSI_INVALID;
5306 rp.tx_power = HCI_TX_POWER_INVALID;
5307 rp.max_tx_power = HCI_TX_POWER_INVALID;
5310 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5311 status, &rp, sizeof(rp));
5313 hci_conn_drop(conn);
5319 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5322 struct hci_cp_read_rssi *cp;
5323 struct mgmt_pending_cmd *cmd;
5324 struct hci_conn *conn;
5328 BT_DBG("status 0x%02x", hci_status);
5332 /* Commands sent in request are either Read RSSI or Read Transmit Power
5333 * Level so we check which one was last sent to retrieve connection
5334 * handle. Both commands have handle as first parameter so it's safe to
5335 * cast data on the same command struct.
5337 * First command sent is always Read RSSI and we fail only if it fails.
5338 * In other case we simply override error to indicate success as we
5339 * already remembered if TX power value is actually valid.
5341 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5343 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5344 status = MGMT_STATUS_SUCCESS;
5346 status = mgmt_status(hci_status);
5350 BT_ERR("invalid sent_cmd in conn_info response");
5354 handle = __le16_to_cpu(cp->handle);
5355 conn = hci_conn_hash_lookup_handle(hdev, handle);
5357 BT_ERR("unknown handle (%d) in conn_info response", handle);
5361 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5365 cmd->cmd_complete(cmd, status);
5366 mgmt_pending_remove(cmd);
5369 hci_dev_unlock(hdev);
5372 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5375 struct mgmt_cp_get_conn_info *cp = data;
5376 struct mgmt_rp_get_conn_info rp;
5377 struct hci_conn *conn;
5378 unsigned long conn_info_age;
5381 BT_DBG("%s", hdev->name);
5383 memset(&rp, 0, sizeof(rp));
5384 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5385 rp.addr.type = cp->addr.type;
5387 if (!bdaddr_type_is_valid(cp->addr.type))
5388 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5389 MGMT_STATUS_INVALID_PARAMS,
5394 if (!hdev_is_powered(hdev)) {
5395 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5396 MGMT_STATUS_NOT_POWERED, &rp,
5401 if (cp->addr.type == BDADDR_BREDR)
5402 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5405 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5407 if (!conn || conn->state != BT_CONNECTED) {
5408 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5409 MGMT_STATUS_NOT_CONNECTED, &rp,
5414 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5415 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5416 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5420 /* To avoid client trying to guess when to poll again for information we
5421 * calculate conn info age as random value between min/max set in hdev.
5423 conn_info_age = hdev->conn_info_min_age +
5424 prandom_u32_max(hdev->conn_info_max_age -
5425 hdev->conn_info_min_age);
5427 /* Query controller to refresh cached values if they are too old or were
5430 if (time_after(jiffies, conn->conn_info_timestamp +
5431 msecs_to_jiffies(conn_info_age)) ||
5432 !conn->conn_info_timestamp) {
5433 struct hci_request req;
5434 struct hci_cp_read_tx_power req_txp_cp;
5435 struct hci_cp_read_rssi req_rssi_cp;
5436 struct mgmt_pending_cmd *cmd;
5438 hci_req_init(&req, hdev);
5439 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5440 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5443 /* For LE links TX power does not change thus we don't need to
5444 * query for it once value is known.
5446 if (!bdaddr_type_is_le(cp->addr.type) ||
5447 conn->tx_power == HCI_TX_POWER_INVALID) {
5448 req_txp_cp.handle = cpu_to_le16(conn->handle);
5449 req_txp_cp.type = 0x00;
5450 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5451 sizeof(req_txp_cp), &req_txp_cp);
5454 /* Max TX power needs to be read only once per connection */
5455 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5456 req_txp_cp.handle = cpu_to_le16(conn->handle);
5457 req_txp_cp.type = 0x01;
5458 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5459 sizeof(req_txp_cp), &req_txp_cp);
5462 err = hci_req_run(&req, conn_info_refresh_complete);
5466 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5473 hci_conn_hold(conn);
5474 cmd->user_data = hci_conn_get(conn);
5475 cmd->cmd_complete = conn_info_cmd_complete;
5477 conn->conn_info_timestamp = jiffies;
5479 /* Cache is valid, just reply with values cached in hci_conn */
5480 rp.rssi = conn->rssi;
5481 rp.tx_power = conn->tx_power;
5482 rp.max_tx_power = conn->max_tx_power;
5484 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5485 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5489 hci_dev_unlock(hdev);
5493 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5495 struct hci_conn *conn = cmd->user_data;
5496 struct mgmt_rp_get_clock_info rp;
5497 struct hci_dev *hdev;
5500 memset(&rp, 0, sizeof(rp));
5501 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5506 hdev = hci_dev_get(cmd->index);
5508 rp.local_clock = cpu_to_le32(hdev->clock);
5513 rp.piconet_clock = cpu_to_le32(conn->clock);
5514 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5518 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5522 hci_conn_drop(conn);
5529 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5531 struct hci_cp_read_clock *hci_cp;
5532 struct mgmt_pending_cmd *cmd;
5533 struct hci_conn *conn;
5535 BT_DBG("%s status %u", hdev->name, status);
5539 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5543 if (hci_cp->which) {
5544 u16 handle = __le16_to_cpu(hci_cp->handle);
5545 conn = hci_conn_hash_lookup_handle(hdev, handle);
5550 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5554 cmd->cmd_complete(cmd, mgmt_status(status));
5555 mgmt_pending_remove(cmd);
5558 hci_dev_unlock(hdev);
5561 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5564 struct mgmt_cp_get_clock_info *cp = data;
5565 struct mgmt_rp_get_clock_info rp;
5566 struct hci_cp_read_clock hci_cp;
5567 struct mgmt_pending_cmd *cmd;
5568 struct hci_request req;
5569 struct hci_conn *conn;
5572 BT_DBG("%s", hdev->name);
5574 memset(&rp, 0, sizeof(rp));
5575 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5576 rp.addr.type = cp->addr.type;
5578 if (cp->addr.type != BDADDR_BREDR)
5579 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5580 MGMT_STATUS_INVALID_PARAMS,
5585 if (!hdev_is_powered(hdev)) {
5586 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5587 MGMT_STATUS_NOT_POWERED, &rp,
5592 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5593 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5595 if (!conn || conn->state != BT_CONNECTED) {
5596 err = mgmt_cmd_complete(sk, hdev->id,
5597 MGMT_OP_GET_CLOCK_INFO,
5598 MGMT_STATUS_NOT_CONNECTED,
5606 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5612 cmd->cmd_complete = clock_info_cmd_complete;
5614 hci_req_init(&req, hdev);
5616 memset(&hci_cp, 0, sizeof(hci_cp));
5617 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5620 hci_conn_hold(conn);
5621 cmd->user_data = hci_conn_get(conn);
5623 hci_cp.handle = cpu_to_le16(conn->handle);
5624 hci_cp.which = 0x01; /* Piconet clock */
5625 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5628 err = hci_req_run(&req, get_clock_info_complete);
5630 mgmt_pending_remove(cmd);
5633 hci_dev_unlock(hdev);
5637 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5639 struct hci_conn *conn;
5641 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5645 if (conn->dst_type != type)
5648 if (conn->state != BT_CONNECTED)
5654 /* This function requires the caller holds hdev->lock */
5655 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5656 u8 addr_type, u8 auto_connect)
5658 struct hci_dev *hdev = req->hdev;
5659 struct hci_conn_params *params;
5661 params = hci_conn_params_add(hdev, addr, addr_type);
5665 if (params->auto_connect == auto_connect)
5668 list_del_init(¶ms->action);
5670 switch (auto_connect) {
5671 case HCI_AUTO_CONN_DISABLED:
5672 case HCI_AUTO_CONN_LINK_LOSS:
5673 __hci_update_background_scan(req);
5675 case HCI_AUTO_CONN_REPORT:
5676 list_add(¶ms->action, &hdev->pend_le_reports);
5677 __hci_update_background_scan(req);
5679 case HCI_AUTO_CONN_DIRECT:
5680 case HCI_AUTO_CONN_ALWAYS:
5681 if (!is_connected(hdev, addr, addr_type)) {
5682 list_add(¶ms->action, &hdev->pend_le_conns);
5683 __hci_update_background_scan(req);
5688 params->auto_connect = auto_connect;
5690 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5696 static void device_added(struct sock *sk, struct hci_dev *hdev,
5697 bdaddr_t *bdaddr, u8 type, u8 action)
5699 struct mgmt_ev_device_added ev;
5701 bacpy(&ev.addr.bdaddr, bdaddr);
5702 ev.addr.type = type;
5705 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5708 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5710 struct mgmt_pending_cmd *cmd;
5712 BT_DBG("status 0x%02x", status);
5716 cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
5720 cmd->cmd_complete(cmd, mgmt_status(status));
5721 mgmt_pending_remove(cmd);
5724 hci_dev_unlock(hdev);
5727 static int add_device(struct sock *sk, struct hci_dev *hdev,
5728 void *data, u16 len)
5730 struct mgmt_cp_add_device *cp = data;
5731 struct mgmt_pending_cmd *cmd;
5732 struct hci_request req;
5733 u8 auto_conn, addr_type;
5736 BT_DBG("%s", hdev->name);
5738 if (!bdaddr_type_is_valid(cp->addr.type) ||
5739 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5740 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5741 MGMT_STATUS_INVALID_PARAMS,
5742 &cp->addr, sizeof(cp->addr));
5744 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5745 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5746 MGMT_STATUS_INVALID_PARAMS,
5747 &cp->addr, sizeof(cp->addr));
5749 hci_req_init(&req, hdev);
5753 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5759 cmd->cmd_complete = addr_cmd_complete;
5761 if (cp->addr.type == BDADDR_BREDR) {
5762 /* Only incoming connections action is supported for now */
5763 if (cp->action != 0x01) {
5764 err = cmd->cmd_complete(cmd,
5765 MGMT_STATUS_INVALID_PARAMS);
5766 mgmt_pending_remove(cmd);
5770 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5775 __hci_update_page_scan(&req);
5780 if (cp->addr.type == BDADDR_LE_PUBLIC)
5781 addr_type = ADDR_LE_DEV_PUBLIC;
5783 addr_type = ADDR_LE_DEV_RANDOM;
5785 if (cp->action == 0x02)
5786 auto_conn = HCI_AUTO_CONN_ALWAYS;
5787 else if (cp->action == 0x01)
5788 auto_conn = HCI_AUTO_CONN_DIRECT;
5790 auto_conn = HCI_AUTO_CONN_REPORT;
5792 /* If the connection parameters don't exist for this device,
5793 * they will be created and configured with defaults.
5795 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5797 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5798 mgmt_pending_remove(cmd);
5803 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5805 err = hci_req_run(&req, add_device_complete);
5807 /* ENODATA means no HCI commands were needed (e.g. if
5808 * the adapter is powered off).
5810 if (err == -ENODATA)
5811 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5812 mgmt_pending_remove(cmd);
5816 hci_dev_unlock(hdev);
5820 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5821 bdaddr_t *bdaddr, u8 type)
5823 struct mgmt_ev_device_removed ev;
5825 bacpy(&ev.addr.bdaddr, bdaddr);
5826 ev.addr.type = type;
5828 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5831 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5833 struct mgmt_pending_cmd *cmd;
5835 BT_DBG("status 0x%02x", status);
5839 cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5843 cmd->cmd_complete(cmd, mgmt_status(status));
5844 mgmt_pending_remove(cmd);
5847 hci_dev_unlock(hdev);
5850 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5851 void *data, u16 len)
5853 struct mgmt_cp_remove_device *cp = data;
5854 struct mgmt_pending_cmd *cmd;
5855 struct hci_request req;
5858 BT_DBG("%s", hdev->name);
5860 hci_req_init(&req, hdev);
5864 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5870 cmd->cmd_complete = addr_cmd_complete;
5872 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5873 struct hci_conn_params *params;
5876 if (!bdaddr_type_is_valid(cp->addr.type)) {
5877 err = cmd->cmd_complete(cmd,
5878 MGMT_STATUS_INVALID_PARAMS);
5879 mgmt_pending_remove(cmd);
5883 if (cp->addr.type == BDADDR_BREDR) {
5884 err = hci_bdaddr_list_del(&hdev->whitelist,
5888 err = cmd->cmd_complete(cmd,
5889 MGMT_STATUS_INVALID_PARAMS);
5890 mgmt_pending_remove(cmd);
5894 __hci_update_page_scan(&req);
5896 device_removed(sk, hdev, &cp->addr.bdaddr,
5901 if (cp->addr.type == BDADDR_LE_PUBLIC)
5902 addr_type = ADDR_LE_DEV_PUBLIC;
5904 addr_type = ADDR_LE_DEV_RANDOM;
5906 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5909 err = cmd->cmd_complete(cmd,
5910 MGMT_STATUS_INVALID_PARAMS);
5911 mgmt_pending_remove(cmd);
5915 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5916 err = cmd->cmd_complete(cmd,
5917 MGMT_STATUS_INVALID_PARAMS);
5918 mgmt_pending_remove(cmd);
5922 list_del(¶ms->action);
5923 list_del(¶ms->list);
5925 __hci_update_background_scan(&req);
5927 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5929 struct hci_conn_params *p, *tmp;
5930 struct bdaddr_list *b, *btmp;
5932 if (cp->addr.type) {
5933 err = cmd->cmd_complete(cmd,
5934 MGMT_STATUS_INVALID_PARAMS);
5935 mgmt_pending_remove(cmd);
5939 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5940 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5945 __hci_update_page_scan(&req);
5947 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5948 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5950 device_removed(sk, hdev, &p->addr, p->addr_type);
5951 list_del(&p->action);
5956 BT_DBG("All LE connection parameters were removed");
5958 __hci_update_background_scan(&req);
5962 err = hci_req_run(&req, remove_device_complete);
5964 /* ENODATA means no HCI commands were needed (e.g. if
5965 * the adapter is powered off).
5967 if (err == -ENODATA)
5968 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5969 mgmt_pending_remove(cmd);
5973 hci_dev_unlock(hdev);
5977 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5980 struct mgmt_cp_load_conn_param *cp = data;
5981 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5982 sizeof(struct mgmt_conn_param));
5983 u16 param_count, expected_len;
5986 if (!lmp_le_capable(hdev))
5987 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5988 MGMT_STATUS_NOT_SUPPORTED);
5990 param_count = __le16_to_cpu(cp->param_count);
5991 if (param_count > max_param_count) {
5992 BT_ERR("load_conn_param: too big param_count value %u",
5994 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5995 MGMT_STATUS_INVALID_PARAMS);
5998 expected_len = sizeof(*cp) + param_count *
5999 sizeof(struct mgmt_conn_param);
6000 if (expected_len != len) {
6001 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6003 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6004 MGMT_STATUS_INVALID_PARAMS);
6007 BT_DBG("%s param_count %u", hdev->name, param_count);
6011 hci_conn_params_clear_disabled(hdev);
6013 for (i = 0; i < param_count; i++) {
6014 struct mgmt_conn_param *param = &cp->params[i];
6015 struct hci_conn_params *hci_param;
6016 u16 min, max, latency, timeout;
6019 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
6022 if (param->addr.type == BDADDR_LE_PUBLIC) {
6023 addr_type = ADDR_LE_DEV_PUBLIC;
6024 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6025 addr_type = ADDR_LE_DEV_RANDOM;
6027 BT_ERR("Ignoring invalid connection parameters");
6031 min = le16_to_cpu(param->min_interval);
6032 max = le16_to_cpu(param->max_interval);
6033 latency = le16_to_cpu(param->latency);
6034 timeout = le16_to_cpu(param->timeout);
6036 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6037 min, max, latency, timeout);
6039 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6040 BT_ERR("Ignoring invalid connection parameters");
6044 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6047 BT_ERR("Failed to add connection parameters");
6051 hci_param->conn_min_interval = min;
6052 hci_param->conn_max_interval = max;
6053 hci_param->conn_latency = latency;
6054 hci_param->supervision_timeout = timeout;
6057 hci_dev_unlock(hdev);
6059 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6063 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6064 void *data, u16 len)
6066 struct mgmt_cp_set_external_config *cp = data;
6070 BT_DBG("%s", hdev->name);
6072 if (hdev_is_powered(hdev))
6073 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6074 MGMT_STATUS_REJECTED);
6076 if (cp->config != 0x00 && cp->config != 0x01)
6077 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6078 MGMT_STATUS_INVALID_PARAMS);
6080 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6081 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6082 MGMT_STATUS_NOT_SUPPORTED);
6087 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
6090 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
6093 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6100 err = new_options(hdev, sk);
6102 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6103 mgmt_index_removed(hdev);
6105 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6106 hci_dev_set_flag(hdev, HCI_CONFIG);
6107 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6109 queue_work(hdev->req_workqueue, &hdev->power_on);
6111 set_bit(HCI_RAW, &hdev->flags);
6112 mgmt_index_added(hdev);
6117 hci_dev_unlock(hdev);
6121 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6122 void *data, u16 len)
6124 struct mgmt_cp_set_public_address *cp = data;
6128 BT_DBG("%s", hdev->name);
6130 if (hdev_is_powered(hdev))
6131 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6132 MGMT_STATUS_REJECTED);
6134 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6135 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6136 MGMT_STATUS_INVALID_PARAMS);
6138 if (!hdev->set_bdaddr)
6139 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6140 MGMT_STATUS_NOT_SUPPORTED);
6144 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6145 bacpy(&hdev->public_addr, &cp->bdaddr);
6147 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6154 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6155 err = new_options(hdev, sk);
6157 if (is_configured(hdev)) {
6158 mgmt_index_removed(hdev);
6160 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6162 hci_dev_set_flag(hdev, HCI_CONFIG);
6163 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6165 queue_work(hdev->req_workqueue, &hdev->power_on);
6169 hci_dev_unlock(hdev);
6173 static const struct hci_mgmt_handler mgmt_handlers[] = {
6174 { NULL }, /* 0x0000 (no command) */
6175 { read_version, MGMT_READ_VERSION_SIZE,
6177 { read_commands, MGMT_READ_COMMANDS_SIZE,
6179 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6181 { read_controller_info, MGMT_READ_INFO_SIZE, 0 },
6182 { set_powered, MGMT_SETTING_SIZE, 0 },
6183 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE, 0 },
6184 { set_connectable, MGMT_SETTING_SIZE, 0 },
6185 { set_fast_connectable, MGMT_SETTING_SIZE, 0 },
6186 { set_bondable, MGMT_SETTING_SIZE, 0 },
6187 { set_link_security, MGMT_SETTING_SIZE, 0 },
6188 { set_ssp, MGMT_SETTING_SIZE, 0 },
6189 { set_hs, MGMT_SETTING_SIZE, 0 },
6190 { set_le, MGMT_SETTING_SIZE, 0 },
6191 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE, 0 },
6192 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE, 0 },
6193 { add_uuid, MGMT_ADD_UUID_SIZE, 0 },
6194 { remove_uuid, MGMT_REMOVE_UUID_SIZE, 0 },
6195 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6197 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6199 { disconnect, MGMT_DISCONNECT_SIZE, 0 },
6200 { get_connections, MGMT_GET_CONNECTIONS_SIZE, 0 },
6201 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE, 0 },
6202 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE, 0 },
6203 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE, 0 },
6204 { pair_device, MGMT_PAIR_DEVICE_SIZE, 0 },
6205 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE, 0 },
6206 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE, 0 },
6207 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE, 0 },
6208 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE, 0 },
6209 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE, 0 },
6210 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE, 0 },
6211 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6212 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6214 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE, 0 },
6215 { start_discovery, MGMT_START_DISCOVERY_SIZE, 0 },
6216 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE, 0 },
6217 { confirm_name, MGMT_CONFIRM_NAME_SIZE, 0 },
6218 { block_device, MGMT_BLOCK_DEVICE_SIZE, 0 },
6219 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE, 0 },
6220 { set_device_id, MGMT_SET_DEVICE_ID_SIZE, 0 },
6221 { set_advertising, MGMT_SETTING_SIZE, 0 },
6222 { set_bredr, MGMT_SETTING_SIZE, 0 },
6223 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE, 0 },
6224 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE, 0 },
6225 { set_secure_conn, MGMT_SETTING_SIZE, 0 },
6226 { set_debug_keys, MGMT_SETTING_SIZE, 0 },
6227 { set_privacy, MGMT_SET_PRIVACY_SIZE, 0 },
6228 { load_irks, MGMT_LOAD_IRKS_SIZE,
6230 { get_conn_info, MGMT_GET_CONN_INFO_SIZE, 0 },
6231 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE, 0 },
6232 { add_device, MGMT_ADD_DEVICE_SIZE, 0 },
6233 { remove_device, MGMT_REMOVE_DEVICE_SIZE, 0 },
6234 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6236 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6238 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6239 HCI_MGMT_UNCONFIGURED },
6240 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6241 HCI_MGMT_UNCONFIGURED },
6242 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6243 HCI_MGMT_UNCONFIGURED },
6244 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6248 int mgmt_control(struct hci_mgmt_chan *chan, struct sock *sk,
6249 struct msghdr *msg, size_t msglen)
6253 struct mgmt_hdr *hdr;
6254 u16 opcode, index, len;
6255 struct hci_dev *hdev = NULL;
6256 const struct hci_mgmt_handler *handler;
6257 bool var_len, no_hdev;
6260 BT_DBG("got %zu bytes", msglen);
6262 if (msglen < sizeof(*hdr))
6265 buf = kmalloc(msglen, GFP_KERNEL);
6269 if (memcpy_from_msg(buf, msg, msglen)) {
6275 opcode = __le16_to_cpu(hdr->opcode);
6276 index = __le16_to_cpu(hdr->index);
6277 len = __le16_to_cpu(hdr->len);
6279 if (len != msglen - sizeof(*hdr)) {
6284 if (opcode >= chan->handler_count ||
6285 chan->handlers[opcode].func == NULL) {
6286 BT_DBG("Unknown op %u", opcode);
6287 err = mgmt_cmd_status(sk, index, opcode,
6288 MGMT_STATUS_UNKNOWN_COMMAND);
6292 handler = &chan->handlers[opcode];
6294 if (index != MGMT_INDEX_NONE) {
6295 hdev = hci_dev_get(index);
6297 err = mgmt_cmd_status(sk, index, opcode,
6298 MGMT_STATUS_INVALID_INDEX);
6302 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
6303 hci_dev_test_flag(hdev, HCI_CONFIG) ||
6304 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
6305 err = mgmt_cmd_status(sk, index, opcode,
6306 MGMT_STATUS_INVALID_INDEX);
6310 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
6311 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
6312 err = mgmt_cmd_status(sk, index, opcode,
6313 MGMT_STATUS_INVALID_INDEX);
6318 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
6319 if (no_hdev != !hdev) {
6320 err = mgmt_cmd_status(sk, index, opcode,
6321 MGMT_STATUS_INVALID_INDEX);
6325 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
6326 if ((var_len && len < handler->data_len) ||
6327 (!var_len && len != handler->data_len)) {
6328 err = mgmt_cmd_status(sk, index, opcode,
6329 MGMT_STATUS_INVALID_PARAMS);
6334 mgmt_init_hdev(sk, hdev);
6336 cp = buf + sizeof(*hdr);
6338 err = handler->func(sk, hdev, cp, len);
6352 void mgmt_index_added(struct hci_dev *hdev)
6354 if (hdev->dev_type != HCI_BREDR)
6357 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6360 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6361 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6363 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6366 void mgmt_index_removed(struct hci_dev *hdev)
6368 u8 status = MGMT_STATUS_INVALID_INDEX;
6370 if (hdev->dev_type != HCI_BREDR)
6373 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6376 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6378 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6379 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6381 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6384 /* This function requires the caller holds hdev->lock */
6385 static void restart_le_actions(struct hci_request *req)
6387 struct hci_dev *hdev = req->hdev;
6388 struct hci_conn_params *p;
6390 list_for_each_entry(p, &hdev->le_conn_params, list) {
6391 /* Needed for AUTO_OFF case where might not "really"
6392 * have been powered off.
6394 list_del_init(&p->action);
6396 switch (p->auto_connect) {
6397 case HCI_AUTO_CONN_DIRECT:
6398 case HCI_AUTO_CONN_ALWAYS:
6399 list_add(&p->action, &hdev->pend_le_conns);
6401 case HCI_AUTO_CONN_REPORT:
6402 list_add(&p->action, &hdev->pend_le_reports);
6409 __hci_update_background_scan(req);
6412 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6414 struct cmd_lookup match = { NULL, hdev };
6416 BT_DBG("status 0x%02x", status);
6419 /* Register the available SMP channels (BR/EDR and LE) only
6420 * when successfully powering on the controller. This late
6421 * registration is required so that LE SMP can clearly
6422 * decide if the public address or static address is used.
6429 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6431 new_settings(hdev, match.sk);
6433 hci_dev_unlock(hdev);
6439 static int powered_update_hci(struct hci_dev *hdev)
6441 struct hci_request req;
6444 hci_req_init(&req, hdev);
6446 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
6447 !lmp_host_ssp_capable(hdev)) {
6450 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
6452 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6455 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
6456 sizeof(support), &support);
6460 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
6461 lmp_bredr_capable(hdev)) {
6462 struct hci_cp_write_le_host_supported cp;
6467 /* Check first if we already have the right
6468 * host state (host features set)
6470 if (cp.le != lmp_host_le_capable(hdev) ||
6471 cp.simul != lmp_host_le_br_capable(hdev))
6472 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6476 if (lmp_le_capable(hdev)) {
6477 /* Make sure the controller has a good default for
6478 * advertising data. This also applies to the case
6479 * where BR/EDR was toggled during the AUTO_OFF phase.
6481 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
6482 update_adv_data(&req);
6483 update_scan_rsp_data(&req);
6486 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6487 enable_advertising(&req);
6489 restart_le_actions(&req);
6492 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
6493 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6494 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6495 sizeof(link_sec), &link_sec);
6497 if (lmp_bredr_capable(hdev)) {
6498 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
6499 write_fast_connectable(&req, true);
6501 write_fast_connectable(&req, false);
6502 __hci_update_page_scan(&req);
6508 return hci_req_run(&req, powered_complete);
6511 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6513 struct cmd_lookup match = { NULL, hdev };
6514 u8 status, zero_cod[] = { 0, 0, 0 };
6517 if (!hci_dev_test_flag(hdev, HCI_MGMT))
6521 if (powered_update_hci(hdev) == 0)
6524 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6529 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6531 /* If the power off is because of hdev unregistration let
6532 * use the appropriate INVALID_INDEX status. Otherwise use
6533 * NOT_POWERED. We cover both scenarios here since later in
6534 * mgmt_index_removed() any hci_conn callbacks will have already
6535 * been triggered, potentially causing misleading DISCONNECTED
6538 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
6539 status = MGMT_STATUS_INVALID_INDEX;
6541 status = MGMT_STATUS_NOT_POWERED;
6543 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6545 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6546 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6547 zero_cod, sizeof(zero_cod), NULL);
6550 err = new_settings(hdev, match.sk);
6558 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6560 struct mgmt_pending_cmd *cmd;
6563 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6567 if (err == -ERFKILL)
6568 status = MGMT_STATUS_RFKILLED;
6570 status = MGMT_STATUS_FAILED;
6572 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6574 mgmt_pending_remove(cmd);
6577 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6579 struct hci_request req;
6583 /* When discoverable timeout triggers, then just make sure
6584 * the limited discoverable flag is cleared. Even in the case
6585 * of a timeout triggered from general discoverable, it is
6586 * safe to unconditionally clear the flag.
6588 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
6589 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6591 hci_req_init(&req, hdev);
6592 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6593 u8 scan = SCAN_PAGE;
6594 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6595 sizeof(scan), &scan);
6598 update_adv_data(&req);
6599 hci_req_run(&req, NULL);
6601 hdev->discov_timeout = 0;
6603 new_settings(hdev, NULL);
6605 hci_dev_unlock(hdev);
6608 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6611 struct mgmt_ev_new_link_key ev;
6613 memset(&ev, 0, sizeof(ev));
6615 ev.store_hint = persistent;
6616 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6617 ev.key.addr.type = BDADDR_BREDR;
6618 ev.key.type = key->type;
6619 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6620 ev.key.pin_len = key->pin_len;
6622 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6625 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6627 switch (ltk->type) {
6630 if (ltk->authenticated)
6631 return MGMT_LTK_AUTHENTICATED;
6632 return MGMT_LTK_UNAUTHENTICATED;
6634 if (ltk->authenticated)
6635 return MGMT_LTK_P256_AUTH;
6636 return MGMT_LTK_P256_UNAUTH;
6637 case SMP_LTK_P256_DEBUG:
6638 return MGMT_LTK_P256_DEBUG;
6641 return MGMT_LTK_UNAUTHENTICATED;
6644 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6646 struct mgmt_ev_new_long_term_key ev;
6648 memset(&ev, 0, sizeof(ev));
6650 /* Devices using resolvable or non-resolvable random addresses
6651 * without providing an indentity resolving key don't require
6652 * to store long term keys. Their addresses will change the
6655 * Only when a remote device provides an identity address
6656 * make sure the long term key is stored. If the remote
6657 * identity is known, the long term keys are internally
6658 * mapped to the identity address. So allow static random
6659 * and public addresses here.
6661 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6662 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6663 ev.store_hint = 0x00;
6665 ev.store_hint = persistent;
6667 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6668 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6669 ev.key.type = mgmt_ltk_type(key);
6670 ev.key.enc_size = key->enc_size;
6671 ev.key.ediv = key->ediv;
6672 ev.key.rand = key->rand;
6674 if (key->type == SMP_LTK)
6677 memcpy(ev.key.val, key->val, sizeof(key->val));
6679 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6682 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6684 struct mgmt_ev_new_irk ev;
6686 memset(&ev, 0, sizeof(ev));
6688 /* For identity resolving keys from devices that are already
6689 * using a public address or static random address, do not
6690 * ask for storing this key. The identity resolving key really
6691 * is only mandatory for devices using resovlable random
6694 * Storing all identity resolving keys has the downside that
6695 * they will be also loaded on next boot of they system. More
6696 * identity resolving keys, means more time during scanning is
6697 * needed to actually resolve these addresses.
6699 if (bacmp(&irk->rpa, BDADDR_ANY))
6700 ev.store_hint = 0x01;
6702 ev.store_hint = 0x00;
6704 bacpy(&ev.rpa, &irk->rpa);
6705 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6706 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6707 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6709 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6712 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6715 struct mgmt_ev_new_csrk ev;
6717 memset(&ev, 0, sizeof(ev));
6719 /* Devices using resolvable or non-resolvable random addresses
6720 * without providing an indentity resolving key don't require
6721 * to store signature resolving keys. Their addresses will change
6722 * the next time around.
6724 * Only when a remote device provides an identity address
6725 * make sure the signature resolving key is stored. So allow
6726 * static random and public addresses here.
6728 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6729 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6730 ev.store_hint = 0x00;
6732 ev.store_hint = persistent;
6734 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6735 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6736 ev.key.type = csrk->type;
6737 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6739 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6742 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6743 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6744 u16 max_interval, u16 latency, u16 timeout)
6746 struct mgmt_ev_new_conn_param ev;
6748 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6751 memset(&ev, 0, sizeof(ev));
6752 bacpy(&ev.addr.bdaddr, bdaddr);
6753 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6754 ev.store_hint = store_hint;
6755 ev.min_interval = cpu_to_le16(min_interval);
6756 ev.max_interval = cpu_to_le16(max_interval);
6757 ev.latency = cpu_to_le16(latency);
6758 ev.timeout = cpu_to_le16(timeout);
6760 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6763 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6766 eir[eir_len++] = sizeof(type) + data_len;
6767 eir[eir_len++] = type;
6768 memcpy(&eir[eir_len], data, data_len);
6769 eir_len += data_len;
6774 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6775 u32 flags, u8 *name, u8 name_len)
6778 struct mgmt_ev_device_connected *ev = (void *) buf;
6781 bacpy(&ev->addr.bdaddr, &conn->dst);
6782 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6784 ev->flags = __cpu_to_le32(flags);
6786 /* We must ensure that the EIR Data fields are ordered and
6787 * unique. Keep it simple for now and avoid the problem by not
6788 * adding any BR/EDR data to the LE adv.
6790 if (conn->le_adv_data_len > 0) {
6791 memcpy(&ev->eir[eir_len],
6792 conn->le_adv_data, conn->le_adv_data_len);
6793 eir_len = conn->le_adv_data_len;
6796 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6799 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6800 eir_len = eir_append_data(ev->eir, eir_len,
6802 conn->dev_class, 3);
6805 ev->eir_len = cpu_to_le16(eir_len);
6807 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6808 sizeof(*ev) + eir_len, NULL);
6811 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
6813 struct sock **sk = data;
6815 cmd->cmd_complete(cmd, 0);
6820 mgmt_pending_remove(cmd);
6823 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
6825 struct hci_dev *hdev = data;
6826 struct mgmt_cp_unpair_device *cp = cmd->param;
6828 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6830 cmd->cmd_complete(cmd, 0);
6831 mgmt_pending_remove(cmd);
6834 bool mgmt_powering_down(struct hci_dev *hdev)
6836 struct mgmt_pending_cmd *cmd;
6837 struct mgmt_mode *cp;
6839 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6850 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6851 u8 link_type, u8 addr_type, u8 reason,
6852 bool mgmt_connected)
6854 struct mgmt_ev_device_disconnected ev;
6855 struct sock *sk = NULL;
6857 /* The connection is still in hci_conn_hash so test for 1
6858 * instead of 0 to know if this is the last one.
6860 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6861 cancel_delayed_work(&hdev->power_off);
6862 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6865 if (!mgmt_connected)
6868 if (link_type != ACL_LINK && link_type != LE_LINK)
6871 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6873 bacpy(&ev.addr.bdaddr, bdaddr);
6874 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6877 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6882 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6886 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6887 u8 link_type, u8 addr_type, u8 status)
6889 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6890 struct mgmt_cp_disconnect *cp;
6891 struct mgmt_pending_cmd *cmd;
6893 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6896 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6902 if (bacmp(bdaddr, &cp->addr.bdaddr))
6905 if (cp->addr.type != bdaddr_type)
6908 cmd->cmd_complete(cmd, mgmt_status(status));
6909 mgmt_pending_remove(cmd);
6912 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6913 u8 addr_type, u8 status)
6915 struct mgmt_ev_connect_failed ev;
6917 /* The connection is still in hci_conn_hash so test for 1
6918 * instead of 0 to know if this is the last one.
6920 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6921 cancel_delayed_work(&hdev->power_off);
6922 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6925 bacpy(&ev.addr.bdaddr, bdaddr);
6926 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6927 ev.status = mgmt_status(status);
6929 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6932 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6934 struct mgmt_ev_pin_code_request ev;
6936 bacpy(&ev.addr.bdaddr, bdaddr);
6937 ev.addr.type = BDADDR_BREDR;
6940 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6943 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6946 struct mgmt_pending_cmd *cmd;
6948 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6952 cmd->cmd_complete(cmd, mgmt_status(status));
6953 mgmt_pending_remove(cmd);
6956 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6959 struct mgmt_pending_cmd *cmd;
6961 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6965 cmd->cmd_complete(cmd, mgmt_status(status));
6966 mgmt_pending_remove(cmd);
6969 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6970 u8 link_type, u8 addr_type, u32 value,
6973 struct mgmt_ev_user_confirm_request ev;
6975 BT_DBG("%s", hdev->name);
6977 bacpy(&ev.addr.bdaddr, bdaddr);
6978 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6979 ev.confirm_hint = confirm_hint;
6980 ev.value = cpu_to_le32(value);
6982 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6986 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6987 u8 link_type, u8 addr_type)
6989 struct mgmt_ev_user_passkey_request ev;
6991 BT_DBG("%s", hdev->name);
6993 bacpy(&ev.addr.bdaddr, bdaddr);
6994 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6996 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7000 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7001 u8 link_type, u8 addr_type, u8 status,
7004 struct mgmt_pending_cmd *cmd;
7006 cmd = mgmt_pending_find(opcode, hdev);
7010 cmd->cmd_complete(cmd, mgmt_status(status));
7011 mgmt_pending_remove(cmd);
7016 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7017 u8 link_type, u8 addr_type, u8 status)
7019 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7020 status, MGMT_OP_USER_CONFIRM_REPLY);
7023 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7024 u8 link_type, u8 addr_type, u8 status)
7026 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7028 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7031 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7032 u8 link_type, u8 addr_type, u8 status)
7034 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7035 status, MGMT_OP_USER_PASSKEY_REPLY);
7038 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7039 u8 link_type, u8 addr_type, u8 status)
7041 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7043 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7046 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7047 u8 link_type, u8 addr_type, u32 passkey,
7050 struct mgmt_ev_passkey_notify ev;
7052 BT_DBG("%s", hdev->name);
7054 bacpy(&ev.addr.bdaddr, bdaddr);
7055 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7056 ev.passkey = __cpu_to_le32(passkey);
7057 ev.entered = entered;
7059 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7062 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7064 struct mgmt_ev_auth_failed ev;
7065 struct mgmt_pending_cmd *cmd;
7066 u8 status = mgmt_status(hci_status);
7068 bacpy(&ev.addr.bdaddr, &conn->dst);
7069 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7072 cmd = find_pairing(conn);
7074 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7075 cmd ? cmd->sk : NULL);
7078 cmd->cmd_complete(cmd, status);
7079 mgmt_pending_remove(cmd);
7083 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7085 struct cmd_lookup match = { NULL, hdev };
7089 u8 mgmt_err = mgmt_status(status);
7090 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7091 cmd_status_rsp, &mgmt_err);
7095 if (test_bit(HCI_AUTH, &hdev->flags))
7096 changed = !test_and_set_bit(HCI_LINK_SECURITY,
7099 changed = test_and_clear_bit(HCI_LINK_SECURITY,
7102 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7106 new_settings(hdev, match.sk);
7112 static void clear_eir(struct hci_request *req)
7114 struct hci_dev *hdev = req->hdev;
7115 struct hci_cp_write_eir cp;
7117 if (!lmp_ext_inq_capable(hdev))
7120 memset(hdev->eir, 0, sizeof(hdev->eir));
7122 memset(&cp, 0, sizeof(cp));
7124 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7127 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7129 struct cmd_lookup match = { NULL, hdev };
7130 struct hci_request req;
7131 bool changed = false;
7134 u8 mgmt_err = mgmt_status(status);
7136 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
7137 &hdev->dev_flags)) {
7138 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7139 new_settings(hdev, NULL);
7142 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7148 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
7150 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
7152 changed = test_and_clear_bit(HCI_HS_ENABLED,
7155 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7158 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7161 new_settings(hdev, match.sk);
7166 hci_req_init(&req, hdev);
7168 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7169 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7170 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7171 sizeof(enable), &enable);
7177 hci_req_run(&req, NULL);
7180 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7182 struct cmd_lookup *match = data;
7184 if (match->sk == NULL) {
7185 match->sk = cmd->sk;
7186 sock_hold(match->sk);
7190 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7193 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7195 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7196 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7197 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7200 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
7207 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7209 struct mgmt_cp_set_local_name ev;
7210 struct mgmt_pending_cmd *cmd;
7215 memset(&ev, 0, sizeof(ev));
7216 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7217 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7219 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7221 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7223 /* If this is a HCI command related to powering on the
7224 * HCI dev don't send any mgmt signals.
7226 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
7230 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7231 cmd ? cmd->sk : NULL);
7234 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7235 u8 *rand192, u8 *hash256, u8 *rand256,
7238 struct mgmt_pending_cmd *cmd;
7240 BT_DBG("%s status %u", hdev->name, status);
7242 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7247 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7248 mgmt_status(status));
7250 struct mgmt_rp_read_local_oob_data rp;
7251 size_t rp_size = sizeof(rp);
7253 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7254 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7256 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7257 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7258 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7260 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7263 mgmt_cmd_complete(cmd->sk, hdev->id,
7264 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7268 mgmt_pending_remove(cmd);
7271 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7275 for (i = 0; i < uuid_count; i++) {
7276 if (!memcmp(uuid, uuids[i], 16))
7283 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7287 while (parsed < eir_len) {
7288 u8 field_len = eir[0];
7295 if (eir_len - parsed < field_len + 1)
7299 case EIR_UUID16_ALL:
7300 case EIR_UUID16_SOME:
7301 for (i = 0; i + 3 <= field_len; i += 2) {
7302 memcpy(uuid, bluetooth_base_uuid, 16);
7303 uuid[13] = eir[i + 3];
7304 uuid[12] = eir[i + 2];
7305 if (has_uuid(uuid, uuid_count, uuids))
7309 case EIR_UUID32_ALL:
7310 case EIR_UUID32_SOME:
7311 for (i = 0; i + 5 <= field_len; i += 4) {
7312 memcpy(uuid, bluetooth_base_uuid, 16);
7313 uuid[15] = eir[i + 5];
7314 uuid[14] = eir[i + 4];
7315 uuid[13] = eir[i + 3];
7316 uuid[12] = eir[i + 2];
7317 if (has_uuid(uuid, uuid_count, uuids))
7321 case EIR_UUID128_ALL:
7322 case EIR_UUID128_SOME:
7323 for (i = 0; i + 17 <= field_len; i += 16) {
7324 memcpy(uuid, eir + i + 2, 16);
7325 if (has_uuid(uuid, uuid_count, uuids))
7331 parsed += field_len + 1;
7332 eir += field_len + 1;
7338 static void restart_le_scan(struct hci_dev *hdev)
7340 /* If controller is not scanning we are done. */
7341 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7344 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7345 hdev->discovery.scan_start +
7346 hdev->discovery.scan_duration))
7349 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
7350 DISCOV_LE_RESTART_DELAY);
7353 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7354 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7356 /* If a RSSI threshold has been specified, and
7357 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7358 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7359 * is set, let it through for further processing, as we might need to
7362 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7363 * the results are also dropped.
7365 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7366 (rssi == HCI_RSSI_INVALID ||
7367 (rssi < hdev->discovery.rssi &&
7368 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7371 if (hdev->discovery.uuid_count != 0) {
7372 /* If a list of UUIDs is provided in filter, results with no
7373 * matching UUID should be dropped.
7375 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7376 hdev->discovery.uuids) &&
7377 !eir_has_uuids(scan_rsp, scan_rsp_len,
7378 hdev->discovery.uuid_count,
7379 hdev->discovery.uuids))
7383 /* If duplicate filtering does not report RSSI changes, then restart
7384 * scanning to ensure updated result with updated RSSI values.
7386 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7387 restart_le_scan(hdev);
7389 /* Validate RSSI value against the RSSI threshold once more. */
7390 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7391 rssi < hdev->discovery.rssi)
7398 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7399 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7400 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7403 struct mgmt_ev_device_found *ev = (void *)buf;
7406 /* Don't send events for a non-kernel initiated discovery. With
7407 * LE one exception is if we have pend_le_reports > 0 in which
7408 * case we're doing passive scanning and want these events.
7410 if (!hci_discovery_active(hdev)) {
7411 if (link_type == ACL_LINK)
7413 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7417 if (hdev->discovery.result_filtering) {
7418 /* We are using service discovery */
7419 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7424 /* Make sure that the buffer is big enough. The 5 extra bytes
7425 * are for the potential CoD field.
7427 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7430 memset(buf, 0, sizeof(buf));
7432 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7433 * RSSI value was reported as 0 when not available. This behavior
7434 * is kept when using device discovery. This is required for full
7435 * backwards compatibility with the API.
7437 * However when using service discovery, the value 127 will be
7438 * returned when the RSSI is not available.
7440 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7441 link_type == ACL_LINK)
7444 bacpy(&ev->addr.bdaddr, bdaddr);
7445 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7447 ev->flags = cpu_to_le32(flags);
7450 /* Copy EIR or advertising data into event */
7451 memcpy(ev->eir, eir, eir_len);
7453 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7454 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7457 if (scan_rsp_len > 0)
7458 /* Append scan response data to event */
7459 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7461 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7462 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7464 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7467 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7468 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7470 struct mgmt_ev_device_found *ev;
7471 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7474 ev = (struct mgmt_ev_device_found *) buf;
7476 memset(buf, 0, sizeof(buf));
7478 bacpy(&ev->addr.bdaddr, bdaddr);
7479 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7482 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7485 ev->eir_len = cpu_to_le16(eir_len);
7487 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7490 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7492 struct mgmt_ev_discovering ev;
7494 BT_DBG("%s discovering %u", hdev->name, discovering);
7496 memset(&ev, 0, sizeof(ev));
7497 ev.type = hdev->discovery.type;
7498 ev.discovering = discovering;
7500 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7503 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7505 BT_DBG("%s status %u", hdev->name, status);
7508 void mgmt_reenable_advertising(struct hci_dev *hdev)
7510 struct hci_request req;
7512 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
7515 hci_req_init(&req, hdev);
7516 enable_advertising(&req);
7517 hci_req_run(&req, adv_enable_complete);
7520 static struct hci_mgmt_chan chan = {
7521 .channel = HCI_CHANNEL_CONTROL,
7522 .handler_count = ARRAY_SIZE(mgmt_handlers),
7523 .handlers = mgmt_handlers,
7528 return hci_mgmt_chan_register(&chan);
7531 void mgmt_exit(void)
7533 hci_mgmt_chan_unregister(&chan);