2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
39 #define MGMT_VERSION 1
40 #define MGMT_REVISION 9
42 static const u16 mgmt_commands[] = {
43 MGMT_OP_READ_INDEX_LIST,
46 MGMT_OP_SET_DISCOVERABLE,
47 MGMT_OP_SET_CONNECTABLE,
48 MGMT_OP_SET_FAST_CONNECTABLE,
50 MGMT_OP_SET_LINK_SECURITY,
54 MGMT_OP_SET_DEV_CLASS,
55 MGMT_OP_SET_LOCAL_NAME,
58 MGMT_OP_LOAD_LINK_KEYS,
59 MGMT_OP_LOAD_LONG_TERM_KEYS,
61 MGMT_OP_GET_CONNECTIONS,
62 MGMT_OP_PIN_CODE_REPLY,
63 MGMT_OP_PIN_CODE_NEG_REPLY,
64 MGMT_OP_SET_IO_CAPABILITY,
66 MGMT_OP_CANCEL_PAIR_DEVICE,
67 MGMT_OP_UNPAIR_DEVICE,
68 MGMT_OP_USER_CONFIRM_REPLY,
69 MGMT_OP_USER_CONFIRM_NEG_REPLY,
70 MGMT_OP_USER_PASSKEY_REPLY,
71 MGMT_OP_USER_PASSKEY_NEG_REPLY,
72 MGMT_OP_READ_LOCAL_OOB_DATA,
73 MGMT_OP_ADD_REMOTE_OOB_DATA,
74 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
75 MGMT_OP_START_DISCOVERY,
76 MGMT_OP_STOP_DISCOVERY,
79 MGMT_OP_UNBLOCK_DEVICE,
80 MGMT_OP_SET_DEVICE_ID,
81 MGMT_OP_SET_ADVERTISING,
83 MGMT_OP_SET_STATIC_ADDRESS,
84 MGMT_OP_SET_SCAN_PARAMS,
85 MGMT_OP_SET_SECURE_CONN,
86 MGMT_OP_SET_DEBUG_KEYS,
89 MGMT_OP_GET_CONN_INFO,
90 MGMT_OP_GET_CLOCK_INFO,
92 MGMT_OP_REMOVE_DEVICE,
93 MGMT_OP_LOAD_CONN_PARAM,
94 MGMT_OP_READ_UNCONF_INDEX_LIST,
95 MGMT_OP_READ_CONFIG_INFO,
96 MGMT_OP_SET_EXTERNAL_CONFIG,
97 MGMT_OP_SET_PUBLIC_ADDRESS,
98 MGMT_OP_START_SERVICE_DISCOVERY,
101 static const u16 mgmt_events[] = {
102 MGMT_EV_CONTROLLER_ERROR,
104 MGMT_EV_INDEX_REMOVED,
105 MGMT_EV_NEW_SETTINGS,
106 MGMT_EV_CLASS_OF_DEV_CHANGED,
107 MGMT_EV_LOCAL_NAME_CHANGED,
108 MGMT_EV_NEW_LINK_KEY,
109 MGMT_EV_NEW_LONG_TERM_KEY,
110 MGMT_EV_DEVICE_CONNECTED,
111 MGMT_EV_DEVICE_DISCONNECTED,
112 MGMT_EV_CONNECT_FAILED,
113 MGMT_EV_PIN_CODE_REQUEST,
114 MGMT_EV_USER_CONFIRM_REQUEST,
115 MGMT_EV_USER_PASSKEY_REQUEST,
117 MGMT_EV_DEVICE_FOUND,
119 MGMT_EV_DEVICE_BLOCKED,
120 MGMT_EV_DEVICE_UNBLOCKED,
121 MGMT_EV_DEVICE_UNPAIRED,
122 MGMT_EV_PASSKEY_NOTIFY,
125 MGMT_EV_DEVICE_ADDED,
126 MGMT_EV_DEVICE_REMOVED,
127 MGMT_EV_NEW_CONN_PARAM,
128 MGMT_EV_UNCONF_INDEX_ADDED,
129 MGMT_EV_UNCONF_INDEX_REMOVED,
130 MGMT_EV_NEW_CONFIG_OPTIONS,
133 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
135 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
136 "\x00\x00\x00\x00\x00\x00\x00\x00"
138 struct mgmt_pending_cmd {
139 struct list_head list;
146 int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status);
149 /* HCI to MGMT error code conversion table */
150 static u8 mgmt_status_table[] = {
152 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
153 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
154 MGMT_STATUS_FAILED, /* Hardware Failure */
155 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
156 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
157 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
158 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
159 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
160 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
161 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
162 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
163 MGMT_STATUS_BUSY, /* Command Disallowed */
164 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
165 MGMT_STATUS_REJECTED, /* Rejected Security */
166 MGMT_STATUS_REJECTED, /* Rejected Personal */
167 MGMT_STATUS_TIMEOUT, /* Host Timeout */
168 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
169 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
170 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
171 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
172 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
173 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
174 MGMT_STATUS_BUSY, /* Repeated Attempts */
175 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
176 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
177 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
178 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
179 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
180 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
181 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
182 MGMT_STATUS_FAILED, /* Unspecified Error */
183 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
184 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
185 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
186 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
187 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
188 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
189 MGMT_STATUS_FAILED, /* Unit Link Key Used */
190 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
191 MGMT_STATUS_TIMEOUT, /* Instant Passed */
192 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
193 MGMT_STATUS_FAILED, /* Transaction Collision */
194 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
195 MGMT_STATUS_REJECTED, /* QoS Rejected */
196 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
197 MGMT_STATUS_REJECTED, /* Insufficient Security */
198 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
199 MGMT_STATUS_BUSY, /* Role Switch Pending */
200 MGMT_STATUS_FAILED, /* Slot Violation */
201 MGMT_STATUS_FAILED, /* Role Switch Failed */
202 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
203 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
204 MGMT_STATUS_BUSY, /* Host Busy Pairing */
205 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
206 MGMT_STATUS_BUSY, /* Controller Busy */
207 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
208 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
209 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
210 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
211 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
214 static u8 mgmt_status(u8 hci_status)
216 if (hci_status < ARRAY_SIZE(mgmt_status_table))
217 return mgmt_status_table[hci_status];
219 return MGMT_STATUS_FAILED;
222 static int mgmt_send_event(u16 event, struct hci_dev *hdev,
223 unsigned short channel, void *data, u16 data_len,
224 struct sock *skip_sk)
227 struct mgmt_hdr *hdr;
229 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
233 hdr = (void *) skb_put(skb, sizeof(*hdr));
234 hdr->opcode = cpu_to_le16(event);
236 hdr->index = cpu_to_le16(hdev->id);
238 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
239 hdr->len = cpu_to_le16(data_len);
242 memcpy(skb_put(skb, data_len), data, data_len);
245 __net_timestamp(skb);
247 hci_send_to_channel(channel, skb, skip_sk);
253 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
254 struct sock *skip_sk)
256 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
260 static int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
263 struct mgmt_hdr *hdr;
264 struct mgmt_ev_cmd_status *ev;
267 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
269 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
273 hdr = (void *) skb_put(skb, sizeof(*hdr));
275 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
276 hdr->index = cpu_to_le16(index);
277 hdr->len = cpu_to_le16(sizeof(*ev));
279 ev = (void *) skb_put(skb, sizeof(*ev));
281 ev->opcode = cpu_to_le16(cmd);
283 err = sock_queue_rcv_skb(sk, skb);
290 static int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
291 void *rp, size_t rp_len)
294 struct mgmt_hdr *hdr;
295 struct mgmt_ev_cmd_complete *ev;
298 BT_DBG("sock %p", sk);
300 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
304 hdr = (void *) skb_put(skb, sizeof(*hdr));
306 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
307 hdr->index = cpu_to_le16(index);
308 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
310 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
311 ev->opcode = cpu_to_le16(cmd);
315 memcpy(ev->data, rp, rp_len);
317 err = sock_queue_rcv_skb(sk, skb);
324 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
327 struct mgmt_rp_read_version rp;
329 BT_DBG("sock %p", sk);
331 rp.version = MGMT_VERSION;
332 rp.revision = cpu_to_le16(MGMT_REVISION);
334 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
338 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
341 struct mgmt_rp_read_commands *rp;
342 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
343 const u16 num_events = ARRAY_SIZE(mgmt_events);
348 BT_DBG("sock %p", sk);
350 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
352 rp = kmalloc(rp_size, GFP_KERNEL);
356 rp->num_commands = cpu_to_le16(num_commands);
357 rp->num_events = cpu_to_le16(num_events);
359 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
360 put_unaligned_le16(mgmt_commands[i], opcode);
362 for (i = 0; i < num_events; i++, opcode++)
363 put_unaligned_le16(mgmt_events[i], opcode);
365 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
372 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
375 struct mgmt_rp_read_index_list *rp;
381 BT_DBG("sock %p", sk);
383 read_lock(&hci_dev_list_lock);
386 list_for_each_entry(d, &hci_dev_list, list) {
387 if (d->dev_type == HCI_BREDR &&
388 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
392 rp_len = sizeof(*rp) + (2 * count);
393 rp = kmalloc(rp_len, GFP_ATOMIC);
395 read_unlock(&hci_dev_list_lock);
400 list_for_each_entry(d, &hci_dev_list, list) {
401 if (test_bit(HCI_SETUP, &d->dev_flags) ||
402 test_bit(HCI_CONFIG, &d->dev_flags) ||
403 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
406 /* Devices marked as raw-only are neither configured
407 * nor unconfigured controllers.
409 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
412 if (d->dev_type == HCI_BREDR &&
413 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
414 rp->index[count++] = cpu_to_le16(d->id);
415 BT_DBG("Added hci%u", d->id);
419 rp->num_controllers = cpu_to_le16(count);
420 rp_len = sizeof(*rp) + (2 * count);
422 read_unlock(&hci_dev_list_lock);
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
432 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
433 void *data, u16 data_len)
435 struct mgmt_rp_read_unconf_index_list *rp;
441 BT_DBG("sock %p", sk);
443 read_lock(&hci_dev_list_lock);
446 list_for_each_entry(d, &hci_dev_list, list) {
447 if (d->dev_type == HCI_BREDR &&
448 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
452 rp_len = sizeof(*rp) + (2 * count);
453 rp = kmalloc(rp_len, GFP_ATOMIC);
455 read_unlock(&hci_dev_list_lock);
460 list_for_each_entry(d, &hci_dev_list, list) {
461 if (test_bit(HCI_SETUP, &d->dev_flags) ||
462 test_bit(HCI_CONFIG, &d->dev_flags) ||
463 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
466 /* Devices marked as raw-only are neither configured
467 * nor unconfigured controllers.
469 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
472 if (d->dev_type == HCI_BREDR &&
473 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
474 rp->index[count++] = cpu_to_le16(d->id);
475 BT_DBG("Added hci%u", d->id);
479 rp->num_controllers = cpu_to_le16(count);
480 rp_len = sizeof(*rp) + (2 * count);
482 read_unlock(&hci_dev_list_lock);
484 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
485 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
492 static bool is_configured(struct hci_dev *hdev)
494 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
495 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
499 !bacmp(&hdev->public_addr, BDADDR_ANY))
505 static __le32 get_missing_options(struct hci_dev *hdev)
509 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
510 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
511 options |= MGMT_OPTION_EXTERNAL_CONFIG;
513 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
514 !bacmp(&hdev->public_addr, BDADDR_ANY))
515 options |= MGMT_OPTION_PUBLIC_ADDRESS;
517 return cpu_to_le32(options);
520 static int new_options(struct hci_dev *hdev, struct sock *skip)
522 __le32 options = get_missing_options(hdev);
524 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
525 sizeof(options), skip);
528 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
530 __le32 options = get_missing_options(hdev);
532 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
536 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
537 void *data, u16 data_len)
539 struct mgmt_rp_read_config_info rp;
542 BT_DBG("sock %p %s", sk, hdev->name);
546 memset(&rp, 0, sizeof(rp));
547 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
549 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
550 options |= MGMT_OPTION_EXTERNAL_CONFIG;
552 if (hdev->set_bdaddr)
553 options |= MGMT_OPTION_PUBLIC_ADDRESS;
555 rp.supported_options = cpu_to_le32(options);
556 rp.missing_options = get_missing_options(hdev);
558 hci_dev_unlock(hdev);
560 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
564 static u32 get_supported_settings(struct hci_dev *hdev)
568 settings |= MGMT_SETTING_POWERED;
569 settings |= MGMT_SETTING_BONDABLE;
570 settings |= MGMT_SETTING_DEBUG_KEYS;
571 settings |= MGMT_SETTING_CONNECTABLE;
572 settings |= MGMT_SETTING_DISCOVERABLE;
574 if (lmp_bredr_capable(hdev)) {
575 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
576 settings |= MGMT_SETTING_FAST_CONNECTABLE;
577 settings |= MGMT_SETTING_BREDR;
578 settings |= MGMT_SETTING_LINK_SECURITY;
580 if (lmp_ssp_capable(hdev)) {
581 settings |= MGMT_SETTING_SSP;
582 settings |= MGMT_SETTING_HS;
585 if (lmp_sc_capable(hdev))
586 settings |= MGMT_SETTING_SECURE_CONN;
589 if (lmp_le_capable(hdev)) {
590 settings |= MGMT_SETTING_LE;
591 settings |= MGMT_SETTING_ADVERTISING;
592 settings |= MGMT_SETTING_SECURE_CONN;
593 settings |= MGMT_SETTING_PRIVACY;
594 settings |= MGMT_SETTING_STATIC_ADDRESS;
597 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
599 settings |= MGMT_SETTING_CONFIGURATION;
604 static u32 get_current_settings(struct hci_dev *hdev)
608 if (hdev_is_powered(hdev))
609 settings |= MGMT_SETTING_POWERED;
611 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
612 settings |= MGMT_SETTING_CONNECTABLE;
614 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
615 settings |= MGMT_SETTING_FAST_CONNECTABLE;
617 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
618 settings |= MGMT_SETTING_DISCOVERABLE;
620 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
621 settings |= MGMT_SETTING_BONDABLE;
623 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
624 settings |= MGMT_SETTING_BREDR;
626 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
627 settings |= MGMT_SETTING_LE;
629 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
630 settings |= MGMT_SETTING_LINK_SECURITY;
632 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
633 settings |= MGMT_SETTING_SSP;
635 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
636 settings |= MGMT_SETTING_HS;
638 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
639 settings |= MGMT_SETTING_ADVERTISING;
641 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
642 settings |= MGMT_SETTING_SECURE_CONN;
644 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
645 settings |= MGMT_SETTING_DEBUG_KEYS;
647 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
648 settings |= MGMT_SETTING_PRIVACY;
650 /* The current setting for static address has two purposes. The
651 * first is to indicate if the static address will be used and
652 * the second is to indicate if it is actually set.
654 * This means if the static address is not configured, this flag
655 * will never bet set. If the address is configured, then if the
656 * address is actually used decides if the flag is set or not.
658 * For single mode LE only controllers and dual-mode controllers
659 * with BR/EDR disabled, the existence of the static address will
662 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
663 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
664 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
665 if (bacmp(&hdev->static_addr, BDADDR_ANY))
666 settings |= MGMT_SETTING_STATIC_ADDRESS;
672 #define PNP_INFO_SVCLASS_ID 0x1200
674 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
676 u8 *ptr = data, *uuids_start = NULL;
677 struct bt_uuid *uuid;
682 list_for_each_entry(uuid, &hdev->uuids, list) {
685 if (uuid->size != 16)
688 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
692 if (uuid16 == PNP_INFO_SVCLASS_ID)
698 uuids_start[1] = EIR_UUID16_ALL;
702 /* Stop if not enough space to put next UUID */
703 if ((ptr - data) + sizeof(u16) > len) {
704 uuids_start[1] = EIR_UUID16_SOME;
708 *ptr++ = (uuid16 & 0x00ff);
709 *ptr++ = (uuid16 & 0xff00) >> 8;
710 uuids_start[0] += sizeof(uuid16);
716 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
718 u8 *ptr = data, *uuids_start = NULL;
719 struct bt_uuid *uuid;
724 list_for_each_entry(uuid, &hdev->uuids, list) {
725 if (uuid->size != 32)
731 uuids_start[1] = EIR_UUID32_ALL;
735 /* Stop if not enough space to put next UUID */
736 if ((ptr - data) + sizeof(u32) > len) {
737 uuids_start[1] = EIR_UUID32_SOME;
741 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
743 uuids_start[0] += sizeof(u32);
749 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
751 u8 *ptr = data, *uuids_start = NULL;
752 struct bt_uuid *uuid;
757 list_for_each_entry(uuid, &hdev->uuids, list) {
758 if (uuid->size != 128)
764 uuids_start[1] = EIR_UUID128_ALL;
768 /* Stop if not enough space to put next UUID */
769 if ((ptr - data) + 16 > len) {
770 uuids_start[1] = EIR_UUID128_SOME;
774 memcpy(ptr, uuid->uuid, 16);
776 uuids_start[0] += 16;
782 static struct mgmt_pending_cmd *mgmt_pending_find(u16 opcode,
783 struct hci_dev *hdev)
785 struct mgmt_pending_cmd *cmd;
787 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
788 if (cmd->opcode == opcode)
795 static struct mgmt_pending_cmd *mgmt_pending_find_data(u16 opcode,
796 struct hci_dev *hdev,
799 struct mgmt_pending_cmd *cmd;
801 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
802 if (cmd->user_data != data)
804 if (cmd->opcode == opcode)
811 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
816 name_len = strlen(hdev->dev_name);
818 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
820 if (name_len > max_len) {
822 ptr[1] = EIR_NAME_SHORT;
824 ptr[1] = EIR_NAME_COMPLETE;
826 ptr[0] = name_len + 1;
828 memcpy(ptr + 2, hdev->dev_name, name_len);
830 ad_len += (name_len + 2);
831 ptr += (name_len + 2);
837 static void update_scan_rsp_data(struct hci_request *req)
839 struct hci_dev *hdev = req->hdev;
840 struct hci_cp_le_set_scan_rsp_data cp;
843 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
846 memset(&cp, 0, sizeof(cp));
848 len = create_scan_rsp_data(hdev, cp.data);
850 if (hdev->scan_rsp_data_len == len &&
851 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
854 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
855 hdev->scan_rsp_data_len = len;
859 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
862 static u8 get_adv_discov_flags(struct hci_dev *hdev)
864 struct mgmt_pending_cmd *cmd;
866 /* If there's a pending mgmt command the flags will not yet have
867 * their final values, so check for this first.
869 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
871 struct mgmt_mode *cp = cmd->param;
873 return LE_AD_GENERAL;
874 else if (cp->val == 0x02)
875 return LE_AD_LIMITED;
877 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
878 return LE_AD_LIMITED;
879 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
880 return LE_AD_GENERAL;
886 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
888 u8 ad_len = 0, flags = 0;
890 flags |= get_adv_discov_flags(hdev);
892 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
893 flags |= LE_AD_NO_BREDR;
896 BT_DBG("adv flags 0x%02x", flags);
906 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
908 ptr[1] = EIR_TX_POWER;
909 ptr[2] = (u8) hdev->adv_tx_power;
918 static void update_adv_data(struct hci_request *req)
920 struct hci_dev *hdev = req->hdev;
921 struct hci_cp_le_set_adv_data cp;
924 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
927 memset(&cp, 0, sizeof(cp));
929 len = create_adv_data(hdev, cp.data);
931 if (hdev->adv_data_len == len &&
932 memcmp(cp.data, hdev->adv_data, len) == 0)
935 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
936 hdev->adv_data_len = len;
940 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
943 int mgmt_update_adv_data(struct hci_dev *hdev)
945 struct hci_request req;
947 hci_req_init(&req, hdev);
948 update_adv_data(&req);
950 return hci_req_run(&req, NULL);
953 static void create_eir(struct hci_dev *hdev, u8 *data)
958 name_len = strlen(hdev->dev_name);
964 ptr[1] = EIR_NAME_SHORT;
966 ptr[1] = EIR_NAME_COMPLETE;
968 /* EIR Data length */
969 ptr[0] = name_len + 1;
971 memcpy(ptr + 2, hdev->dev_name, name_len);
973 ptr += (name_len + 2);
976 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
978 ptr[1] = EIR_TX_POWER;
979 ptr[2] = (u8) hdev->inq_tx_power;
984 if (hdev->devid_source > 0) {
986 ptr[1] = EIR_DEVICE_ID;
988 put_unaligned_le16(hdev->devid_source, ptr + 2);
989 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
990 put_unaligned_le16(hdev->devid_product, ptr + 6);
991 put_unaligned_le16(hdev->devid_version, ptr + 8);
996 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
997 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
998 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1001 static void update_eir(struct hci_request *req)
1003 struct hci_dev *hdev = req->hdev;
1004 struct hci_cp_write_eir cp;
1006 if (!hdev_is_powered(hdev))
1009 if (!lmp_ext_inq_capable(hdev))
1012 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1015 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1018 memset(&cp, 0, sizeof(cp));
1020 create_eir(hdev, cp.data);
1022 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1025 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1027 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1030 static u8 get_service_classes(struct hci_dev *hdev)
1032 struct bt_uuid *uuid;
1035 list_for_each_entry(uuid, &hdev->uuids, list)
1036 val |= uuid->svc_hint;
1041 static void update_class(struct hci_request *req)
1043 struct hci_dev *hdev = req->hdev;
1046 BT_DBG("%s", hdev->name);
1048 if (!hdev_is_powered(hdev))
1051 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1054 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1057 cod[0] = hdev->minor_class;
1058 cod[1] = hdev->major_class;
1059 cod[2] = get_service_classes(hdev);
1061 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1064 if (memcmp(cod, hdev->dev_class, 3) == 0)
1067 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1070 static bool get_connectable(struct hci_dev *hdev)
1072 struct mgmt_pending_cmd *cmd;
1074 /* If there's a pending mgmt command the flag will not yet have
1075 * it's final value, so check for this first.
1077 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1079 struct mgmt_mode *cp = cmd->param;
1083 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1086 static void disable_advertising(struct hci_request *req)
1090 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1093 static void enable_advertising(struct hci_request *req)
1095 struct hci_dev *hdev = req->hdev;
1096 struct hci_cp_le_set_adv_param cp;
1097 u8 own_addr_type, enable = 0x01;
1100 if (hci_conn_num(hdev, LE_LINK) > 0)
1103 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1104 disable_advertising(req);
1106 /* Clear the HCI_LE_ADV bit temporarily so that the
1107 * hci_update_random_address knows that it's safe to go ahead
1108 * and write a new random address. The flag will be set back on
1109 * as soon as the SET_ADV_ENABLE HCI command completes.
1111 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1113 if (test_bit(HCI_ADVERTISING_CONNECTABLE, &hdev->dev_flags))
1116 connectable = get_connectable(hdev);
1118 /* Set require_privacy to true only when non-connectable
1119 * advertising is used. In that case it is fine to use a
1120 * non-resolvable private address.
1122 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1125 memset(&cp, 0, sizeof(cp));
1126 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1127 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1128 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1129 cp.own_address_type = own_addr_type;
1130 cp.channel_map = hdev->le_adv_channel_map;
1132 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1134 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1137 static void service_cache_off(struct work_struct *work)
1139 struct hci_dev *hdev = container_of(work, struct hci_dev,
1140 service_cache.work);
1141 struct hci_request req;
1143 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1146 hci_req_init(&req, hdev);
1153 hci_dev_unlock(hdev);
1155 hci_req_run(&req, NULL);
1158 static void rpa_expired(struct work_struct *work)
1160 struct hci_dev *hdev = container_of(work, struct hci_dev,
1162 struct hci_request req;
1166 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1168 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1171 /* The generation of a new RPA and programming it into the
1172 * controller happens in the enable_advertising() function.
1174 hci_req_init(&req, hdev);
1175 enable_advertising(&req);
1176 hci_req_run(&req, NULL);
1179 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1181 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1184 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1185 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1187 /* Non-mgmt controlled devices get this bit set
1188 * implicitly so that pairing works for them, however
1189 * for mgmt we require user-space to explicitly enable
1192 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1195 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1196 void *data, u16 data_len)
1198 struct mgmt_rp_read_info rp;
1200 BT_DBG("sock %p %s", sk, hdev->name);
1204 memset(&rp, 0, sizeof(rp));
1206 bacpy(&rp.bdaddr, &hdev->bdaddr);
1208 rp.version = hdev->hci_ver;
1209 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1211 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1212 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1214 memcpy(rp.dev_class, hdev->dev_class, 3);
1216 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1217 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1219 hci_dev_unlock(hdev);
1221 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1225 static void mgmt_pending_free(struct mgmt_pending_cmd *cmd)
1232 static struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1233 struct hci_dev *hdev,
1234 void *data, u16 len)
1236 struct mgmt_pending_cmd *cmd;
1238 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1242 cmd->opcode = opcode;
1243 cmd->index = hdev->id;
1245 cmd->param = kmemdup(data, len, GFP_KERNEL);
1251 cmd->param_len = len;
1256 list_add(&cmd->list, &hdev->mgmt_pending);
1261 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1262 void (*cb)(struct mgmt_pending_cmd *cmd,
1266 struct mgmt_pending_cmd *cmd, *tmp;
1268 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1269 if (opcode > 0 && cmd->opcode != opcode)
1276 static void mgmt_pending_remove(struct mgmt_pending_cmd *cmd)
1278 list_del(&cmd->list);
1279 mgmt_pending_free(cmd);
1282 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1284 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1286 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1290 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1292 BT_DBG("%s status 0x%02x", hdev->name, status);
1294 if (hci_conn_count(hdev) == 0) {
1295 cancel_delayed_work(&hdev->power_off);
1296 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1300 static bool hci_stop_discovery(struct hci_request *req)
1302 struct hci_dev *hdev = req->hdev;
1303 struct hci_cp_remote_name_req_cancel cp;
1304 struct inquiry_entry *e;
1306 switch (hdev->discovery.state) {
1307 case DISCOVERY_FINDING:
1308 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1309 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1311 cancel_delayed_work(&hdev->le_scan_disable);
1312 hci_req_add_le_scan_disable(req);
1317 case DISCOVERY_RESOLVING:
1318 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1323 bacpy(&cp.bdaddr, &e->data.bdaddr);
1324 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1330 /* Passive scanning */
1331 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1332 hci_req_add_le_scan_disable(req);
1342 static int clean_up_hci_state(struct hci_dev *hdev)
1344 struct hci_request req;
1345 struct hci_conn *conn;
1346 bool discov_stopped;
1349 hci_req_init(&req, hdev);
1351 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1352 test_bit(HCI_PSCAN, &hdev->flags)) {
1354 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1357 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1358 disable_advertising(&req);
1360 discov_stopped = hci_stop_discovery(&req);
1362 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1363 struct hci_cp_disconnect dc;
1364 struct hci_cp_reject_conn_req rej;
1366 switch (conn->state) {
1369 dc.handle = cpu_to_le16(conn->handle);
1370 dc.reason = 0x15; /* Terminated due to Power Off */
1371 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1374 if (conn->type == LE_LINK)
1375 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1377 else if (conn->type == ACL_LINK)
1378 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1382 bacpy(&rej.bdaddr, &conn->dst);
1383 rej.reason = 0x15; /* Terminated due to Power Off */
1384 if (conn->type == ACL_LINK)
1385 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1387 else if (conn->type == SCO_LINK)
1388 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1394 err = hci_req_run(&req, clean_up_hci_complete);
1395 if (!err && discov_stopped)
1396 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1401 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1404 struct mgmt_mode *cp = data;
1405 struct mgmt_pending_cmd *cmd;
1408 BT_DBG("request for %s", hdev->name);
1410 if (cp->val != 0x00 && cp->val != 0x01)
1411 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1412 MGMT_STATUS_INVALID_PARAMS);
1416 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1417 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1422 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1423 cancel_delayed_work(&hdev->power_off);
1426 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1428 err = mgmt_powered(hdev, 1);
1433 if (!!cp->val == hdev_is_powered(hdev)) {
1434 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1438 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1445 queue_work(hdev->req_workqueue, &hdev->power_on);
1448 /* Disconnect connections, stop scans, etc */
1449 err = clean_up_hci_state(hdev);
1451 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1452 HCI_POWER_OFF_TIMEOUT);
1454 /* ENODATA means there were no HCI commands queued */
1455 if (err == -ENODATA) {
1456 cancel_delayed_work(&hdev->power_off);
1457 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1463 hci_dev_unlock(hdev);
1467 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1471 ev = cpu_to_le32(get_current_settings(hdev));
1473 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1476 int mgmt_new_settings(struct hci_dev *hdev)
1478 return new_settings(hdev, NULL);
1483 struct hci_dev *hdev;
1487 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1489 struct cmd_lookup *match = data;
1491 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1493 list_del(&cmd->list);
1495 if (match->sk == NULL) {
1496 match->sk = cmd->sk;
1497 sock_hold(match->sk);
1500 mgmt_pending_free(cmd);
1503 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1507 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1508 mgmt_pending_remove(cmd);
1511 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1513 if (cmd->cmd_complete) {
1516 cmd->cmd_complete(cmd, *status);
1517 mgmt_pending_remove(cmd);
1522 cmd_status_rsp(cmd, data);
1525 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1527 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1528 cmd->param, cmd->param_len);
1531 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1533 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1534 cmd->param, sizeof(struct mgmt_addr_info));
1537 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1539 if (!lmp_bredr_capable(hdev))
1540 return MGMT_STATUS_NOT_SUPPORTED;
1541 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1542 return MGMT_STATUS_REJECTED;
1544 return MGMT_STATUS_SUCCESS;
1547 static u8 mgmt_le_support(struct hci_dev *hdev)
1549 if (!lmp_le_capable(hdev))
1550 return MGMT_STATUS_NOT_SUPPORTED;
1551 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1552 return MGMT_STATUS_REJECTED;
1554 return MGMT_STATUS_SUCCESS;
1557 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1560 struct mgmt_pending_cmd *cmd;
1561 struct mgmt_mode *cp;
1562 struct hci_request req;
1565 BT_DBG("status 0x%02x", status);
1569 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1574 u8 mgmt_err = mgmt_status(status);
1575 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1576 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1582 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1585 if (hdev->discov_timeout > 0) {
1586 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1587 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1591 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1595 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1598 new_settings(hdev, cmd->sk);
1600 /* When the discoverable mode gets changed, make sure
1601 * that class of device has the limited discoverable
1602 * bit correctly set. Also update page scan based on whitelist
1605 hci_req_init(&req, hdev);
1606 __hci_update_page_scan(&req);
1608 hci_req_run(&req, NULL);
1611 mgmt_pending_remove(cmd);
1614 hci_dev_unlock(hdev);
1617 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1620 struct mgmt_cp_set_discoverable *cp = data;
1621 struct mgmt_pending_cmd *cmd;
1622 struct hci_request req;
1627 BT_DBG("request for %s", hdev->name);
1629 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1630 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1631 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1632 MGMT_STATUS_REJECTED);
1634 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1635 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1636 MGMT_STATUS_INVALID_PARAMS);
1638 timeout = __le16_to_cpu(cp->timeout);
1640 /* Disabling discoverable requires that no timeout is set,
1641 * and enabling limited discoverable requires a timeout.
1643 if ((cp->val == 0x00 && timeout > 0) ||
1644 (cp->val == 0x02 && timeout == 0))
1645 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1646 MGMT_STATUS_INVALID_PARAMS);
1650 if (!hdev_is_powered(hdev) && timeout > 0) {
1651 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1652 MGMT_STATUS_NOT_POWERED);
1656 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1657 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1658 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1663 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1664 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1665 MGMT_STATUS_REJECTED);
1669 if (!hdev_is_powered(hdev)) {
1670 bool changed = false;
1672 /* Setting limited discoverable when powered off is
1673 * not a valid operation since it requires a timeout
1674 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1676 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1677 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1681 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1686 err = new_settings(hdev, sk);
1691 /* If the current mode is the same, then just update the timeout
1692 * value with the new value. And if only the timeout gets updated,
1693 * then no need for any HCI transactions.
1695 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1696 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1697 &hdev->dev_flags)) {
1698 cancel_delayed_work(&hdev->discov_off);
1699 hdev->discov_timeout = timeout;
1701 if (cp->val && hdev->discov_timeout > 0) {
1702 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1703 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1707 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1711 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1717 /* Cancel any potential discoverable timeout that might be
1718 * still active and store new timeout value. The arming of
1719 * the timeout happens in the complete handler.
1721 cancel_delayed_work(&hdev->discov_off);
1722 hdev->discov_timeout = timeout;
1724 /* Limited discoverable mode */
1725 if (cp->val == 0x02)
1726 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1728 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1730 hci_req_init(&req, hdev);
1732 /* The procedure for LE-only controllers is much simpler - just
1733 * update the advertising data.
1735 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1741 struct hci_cp_write_current_iac_lap hci_cp;
1743 if (cp->val == 0x02) {
1744 /* Limited discoverable mode */
1745 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1746 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1747 hci_cp.iac_lap[1] = 0x8b;
1748 hci_cp.iac_lap[2] = 0x9e;
1749 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1750 hci_cp.iac_lap[4] = 0x8b;
1751 hci_cp.iac_lap[5] = 0x9e;
1753 /* General discoverable mode */
1755 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1756 hci_cp.iac_lap[1] = 0x8b;
1757 hci_cp.iac_lap[2] = 0x9e;
1760 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1761 (hci_cp.num_iac * 3) + 1, &hci_cp);
1763 scan |= SCAN_INQUIRY;
1765 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1768 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1771 update_adv_data(&req);
1773 err = hci_req_run(&req, set_discoverable_complete);
1775 mgmt_pending_remove(cmd);
1778 hci_dev_unlock(hdev);
1782 static void write_fast_connectable(struct hci_request *req, bool enable)
1784 struct hci_dev *hdev = req->hdev;
1785 struct hci_cp_write_page_scan_activity acp;
1788 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1791 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1795 type = PAGE_SCAN_TYPE_INTERLACED;
1797 /* 160 msec page scan interval */
1798 acp.interval = cpu_to_le16(0x0100);
1800 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1802 /* default 1.28 sec page scan */
1803 acp.interval = cpu_to_le16(0x0800);
1806 acp.window = cpu_to_le16(0x0012);
1808 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1809 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1810 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1813 if (hdev->page_scan_type != type)
1814 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1817 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1820 struct mgmt_pending_cmd *cmd;
1821 struct mgmt_mode *cp;
1822 bool conn_changed, discov_changed;
1824 BT_DBG("status 0x%02x", status);
1828 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1833 u8 mgmt_err = mgmt_status(status);
1834 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1840 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1842 discov_changed = false;
1844 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1846 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1850 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1852 if (conn_changed || discov_changed) {
1853 new_settings(hdev, cmd->sk);
1854 hci_update_page_scan(hdev);
1856 mgmt_update_adv_data(hdev);
1857 hci_update_background_scan(hdev);
1861 mgmt_pending_remove(cmd);
1864 hci_dev_unlock(hdev);
1867 static int set_connectable_update_settings(struct hci_dev *hdev,
1868 struct sock *sk, u8 val)
1870 bool changed = false;
1873 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1877 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1879 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1880 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1883 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1888 hci_update_page_scan(hdev);
1889 hci_update_background_scan(hdev);
1890 return new_settings(hdev, sk);
1896 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1899 struct mgmt_mode *cp = data;
1900 struct mgmt_pending_cmd *cmd;
1901 struct hci_request req;
1905 BT_DBG("request for %s", hdev->name);
1907 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1908 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1909 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1910 MGMT_STATUS_REJECTED);
1912 if (cp->val != 0x00 && cp->val != 0x01)
1913 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1914 MGMT_STATUS_INVALID_PARAMS);
1918 if (!hdev_is_powered(hdev)) {
1919 err = set_connectable_update_settings(hdev, sk, cp->val);
1923 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1924 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1925 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1930 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1936 hci_req_init(&req, hdev);
1938 /* If BR/EDR is not enabled and we disable advertising as a
1939 * by-product of disabling connectable, we need to update the
1940 * advertising flags.
1942 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1944 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1945 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1947 update_adv_data(&req);
1948 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1952 /* If we don't have any whitelist entries just
1953 * disable all scanning. If there are entries
1954 * and we had both page and inquiry scanning
1955 * enabled then fall back to only page scanning.
1956 * Otherwise no changes are needed.
1958 if (list_empty(&hdev->whitelist))
1959 scan = SCAN_DISABLED;
1960 else if (test_bit(HCI_ISCAN, &hdev->flags))
1963 goto no_scan_update;
1965 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1966 hdev->discov_timeout > 0)
1967 cancel_delayed_work(&hdev->discov_off);
1970 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1974 /* Update the advertising parameters if necessary */
1975 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1976 enable_advertising(&req);
1978 err = hci_req_run(&req, set_connectable_complete);
1980 mgmt_pending_remove(cmd);
1981 if (err == -ENODATA)
1982 err = set_connectable_update_settings(hdev, sk,
1988 hci_dev_unlock(hdev);
1992 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1995 struct mgmt_mode *cp = data;
1999 BT_DBG("request for %s", hdev->name);
2001 if (cp->val != 0x00 && cp->val != 0x01)
2002 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2003 MGMT_STATUS_INVALID_PARAMS);
2008 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
2010 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
2012 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2017 err = new_settings(hdev, sk);
2020 hci_dev_unlock(hdev);
2024 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2027 struct mgmt_mode *cp = data;
2028 struct mgmt_pending_cmd *cmd;
2032 BT_DBG("request for %s", hdev->name);
2034 status = mgmt_bredr_support(hdev);
2036 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2039 if (cp->val != 0x00 && cp->val != 0x01)
2040 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2041 MGMT_STATUS_INVALID_PARAMS);
2045 if (!hdev_is_powered(hdev)) {
2046 bool changed = false;
2048 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2049 &hdev->dev_flags)) {
2050 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2054 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2059 err = new_settings(hdev, sk);
2064 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2065 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2072 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2073 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2077 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2083 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2085 mgmt_pending_remove(cmd);
2090 hci_dev_unlock(hdev);
2094 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2096 struct mgmt_mode *cp = data;
2097 struct mgmt_pending_cmd *cmd;
2101 BT_DBG("request for %s", hdev->name);
2103 status = mgmt_bredr_support(hdev);
2105 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2107 if (!lmp_ssp_capable(hdev))
2108 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2109 MGMT_STATUS_NOT_SUPPORTED);
2111 if (cp->val != 0x00 && cp->val != 0x01)
2112 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2113 MGMT_STATUS_INVALID_PARAMS);
2117 if (!hdev_is_powered(hdev)) {
2121 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2124 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2127 changed = test_and_clear_bit(HCI_HS_ENABLED,
2130 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2133 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2138 err = new_settings(hdev, sk);
2143 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2144 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2149 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2150 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2154 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2160 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2161 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2162 sizeof(cp->val), &cp->val);
2164 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2166 mgmt_pending_remove(cmd);
2171 hci_dev_unlock(hdev);
2175 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2177 struct mgmt_mode *cp = data;
2182 BT_DBG("request for %s", hdev->name);
2184 status = mgmt_bredr_support(hdev);
2186 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2188 if (!lmp_ssp_capable(hdev))
2189 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2190 MGMT_STATUS_NOT_SUPPORTED);
2192 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2193 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2194 MGMT_STATUS_REJECTED);
2196 if (cp->val != 0x00 && cp->val != 0x01)
2197 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2198 MGMT_STATUS_INVALID_PARAMS);
2202 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2203 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2209 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2211 if (hdev_is_powered(hdev)) {
2212 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2213 MGMT_STATUS_REJECTED);
2217 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2220 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2225 err = new_settings(hdev, sk);
2228 hci_dev_unlock(hdev);
2232 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2234 struct cmd_lookup match = { NULL, hdev };
2239 u8 mgmt_err = mgmt_status(status);
2241 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2246 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2248 new_settings(hdev, match.sk);
2253 /* Make sure the controller has a good default for
2254 * advertising data. Restrict the update to when LE
2255 * has actually been enabled. During power on, the
2256 * update in powered_update_hci will take care of it.
2258 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2259 struct hci_request req;
2261 hci_req_init(&req, hdev);
2262 update_adv_data(&req);
2263 update_scan_rsp_data(&req);
2264 __hci_update_background_scan(&req);
2265 hci_req_run(&req, NULL);
2269 hci_dev_unlock(hdev);
2272 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2274 struct mgmt_mode *cp = data;
2275 struct hci_cp_write_le_host_supported hci_cp;
2276 struct mgmt_pending_cmd *cmd;
2277 struct hci_request req;
2281 BT_DBG("request for %s", hdev->name);
2283 if (!lmp_le_capable(hdev))
2284 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2285 MGMT_STATUS_NOT_SUPPORTED);
2287 if (cp->val != 0x00 && cp->val != 0x01)
2288 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2289 MGMT_STATUS_INVALID_PARAMS);
2291 /* LE-only devices do not allow toggling LE on/off */
2292 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2293 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2294 MGMT_STATUS_REJECTED);
2299 enabled = lmp_host_le_capable(hdev);
2301 if (!hdev_is_powered(hdev) || val == enabled) {
2302 bool changed = false;
2304 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2305 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2309 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2310 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2314 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2319 err = new_settings(hdev, sk);
2324 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2325 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2326 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2331 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2337 hci_req_init(&req, hdev);
2339 memset(&hci_cp, 0, sizeof(hci_cp));
2343 hci_cp.simul = 0x00;
2345 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2346 disable_advertising(&req);
2349 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2352 err = hci_req_run(&req, le_enable_complete);
2354 mgmt_pending_remove(cmd);
2357 hci_dev_unlock(hdev);
2361 /* This is a helper function to test for pending mgmt commands that can
2362 * cause CoD or EIR HCI commands. We can only allow one such pending
2363 * mgmt command at a time since otherwise we cannot easily track what
2364 * the current values are, will be, and based on that calculate if a new
2365 * HCI command needs to be sent and if yes with what value.
2367 static bool pending_eir_or_class(struct hci_dev *hdev)
2369 struct mgmt_pending_cmd *cmd;
2371 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2372 switch (cmd->opcode) {
2373 case MGMT_OP_ADD_UUID:
2374 case MGMT_OP_REMOVE_UUID:
2375 case MGMT_OP_SET_DEV_CLASS:
2376 case MGMT_OP_SET_POWERED:
2384 static const u8 bluetooth_base_uuid[] = {
2385 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2386 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2389 static u8 get_uuid_size(const u8 *uuid)
2393 if (memcmp(uuid, bluetooth_base_uuid, 12))
2396 val = get_unaligned_le32(&uuid[12]);
2403 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2405 struct mgmt_pending_cmd *cmd;
2409 cmd = mgmt_pending_find(mgmt_op, hdev);
2413 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2414 mgmt_status(status), hdev->dev_class, 3);
2416 mgmt_pending_remove(cmd);
2419 hci_dev_unlock(hdev);
2422 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2424 BT_DBG("status 0x%02x", status);
2426 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2429 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2431 struct mgmt_cp_add_uuid *cp = data;
2432 struct mgmt_pending_cmd *cmd;
2433 struct hci_request req;
2434 struct bt_uuid *uuid;
2437 BT_DBG("request for %s", hdev->name);
2441 if (pending_eir_or_class(hdev)) {
2442 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2447 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2453 memcpy(uuid->uuid, cp->uuid, 16);
2454 uuid->svc_hint = cp->svc_hint;
2455 uuid->size = get_uuid_size(cp->uuid);
2457 list_add_tail(&uuid->list, &hdev->uuids);
2459 hci_req_init(&req, hdev);
2464 err = hci_req_run(&req, add_uuid_complete);
2466 if (err != -ENODATA)
2469 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2470 hdev->dev_class, 3);
2474 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2483 hci_dev_unlock(hdev);
2487 static bool enable_service_cache(struct hci_dev *hdev)
2489 if (!hdev_is_powered(hdev))
2492 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2493 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2501 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2503 BT_DBG("status 0x%02x", status);
2505 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2508 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2511 struct mgmt_cp_remove_uuid *cp = data;
2512 struct mgmt_pending_cmd *cmd;
2513 struct bt_uuid *match, *tmp;
2514 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2515 struct hci_request req;
2518 BT_DBG("request for %s", hdev->name);
2522 if (pending_eir_or_class(hdev)) {
2523 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2528 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2529 hci_uuids_clear(hdev);
2531 if (enable_service_cache(hdev)) {
2532 err = mgmt_cmd_complete(sk, hdev->id,
2533 MGMT_OP_REMOVE_UUID,
2534 0, hdev->dev_class, 3);
2543 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2544 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2547 list_del(&match->list);
2553 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2554 MGMT_STATUS_INVALID_PARAMS);
2559 hci_req_init(&req, hdev);
2564 err = hci_req_run(&req, remove_uuid_complete);
2566 if (err != -ENODATA)
2569 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2570 hdev->dev_class, 3);
2574 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2583 hci_dev_unlock(hdev);
2587 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2589 BT_DBG("status 0x%02x", status);
2591 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2594 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2597 struct mgmt_cp_set_dev_class *cp = data;
2598 struct mgmt_pending_cmd *cmd;
2599 struct hci_request req;
2602 BT_DBG("request for %s", hdev->name);
2604 if (!lmp_bredr_capable(hdev))
2605 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2606 MGMT_STATUS_NOT_SUPPORTED);
2610 if (pending_eir_or_class(hdev)) {
2611 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2616 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2617 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2618 MGMT_STATUS_INVALID_PARAMS);
2622 hdev->major_class = cp->major;
2623 hdev->minor_class = cp->minor;
2625 if (!hdev_is_powered(hdev)) {
2626 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2627 hdev->dev_class, 3);
2631 hci_req_init(&req, hdev);
2633 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2634 hci_dev_unlock(hdev);
2635 cancel_delayed_work_sync(&hdev->service_cache);
2642 err = hci_req_run(&req, set_class_complete);
2644 if (err != -ENODATA)
2647 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2648 hdev->dev_class, 3);
2652 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2661 hci_dev_unlock(hdev);
2665 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2668 struct mgmt_cp_load_link_keys *cp = data;
2669 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2670 sizeof(struct mgmt_link_key_info));
2671 u16 key_count, expected_len;
2675 BT_DBG("request for %s", hdev->name);
2677 if (!lmp_bredr_capable(hdev))
2678 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2679 MGMT_STATUS_NOT_SUPPORTED);
2681 key_count = __le16_to_cpu(cp->key_count);
2682 if (key_count > max_key_count) {
2683 BT_ERR("load_link_keys: too big key_count value %u",
2685 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2686 MGMT_STATUS_INVALID_PARAMS);
2689 expected_len = sizeof(*cp) + key_count *
2690 sizeof(struct mgmt_link_key_info);
2691 if (expected_len != len) {
2692 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2694 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2695 MGMT_STATUS_INVALID_PARAMS);
2698 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2699 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2700 MGMT_STATUS_INVALID_PARAMS);
2702 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2705 for (i = 0; i < key_count; i++) {
2706 struct mgmt_link_key_info *key = &cp->keys[i];
2708 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2709 return mgmt_cmd_status(sk, hdev->id,
2710 MGMT_OP_LOAD_LINK_KEYS,
2711 MGMT_STATUS_INVALID_PARAMS);
2716 hci_link_keys_clear(hdev);
2719 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2722 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2726 new_settings(hdev, NULL);
2728 for (i = 0; i < key_count; i++) {
2729 struct mgmt_link_key_info *key = &cp->keys[i];
2731 /* Always ignore debug keys and require a new pairing if
2732 * the user wants to use them.
2734 if (key->type == HCI_LK_DEBUG_COMBINATION)
2737 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2738 key->type, key->pin_len, NULL);
2741 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2743 hci_dev_unlock(hdev);
2748 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2749 u8 addr_type, struct sock *skip_sk)
2751 struct mgmt_ev_device_unpaired ev;
2753 bacpy(&ev.addr.bdaddr, bdaddr);
2754 ev.addr.type = addr_type;
2756 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2760 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2763 struct mgmt_cp_unpair_device *cp = data;
2764 struct mgmt_rp_unpair_device rp;
2765 struct hci_cp_disconnect dc;
2766 struct mgmt_pending_cmd *cmd;
2767 struct hci_conn *conn;
2770 memset(&rp, 0, sizeof(rp));
2771 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2772 rp.addr.type = cp->addr.type;
2774 if (!bdaddr_type_is_valid(cp->addr.type))
2775 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2776 MGMT_STATUS_INVALID_PARAMS,
2779 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2780 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2781 MGMT_STATUS_INVALID_PARAMS,
2786 if (!hdev_is_powered(hdev)) {
2787 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2788 MGMT_STATUS_NOT_POWERED, &rp,
2793 if (cp->addr.type == BDADDR_BREDR) {
2794 /* If disconnection is requested, then look up the
2795 * connection. If the remote device is connected, it
2796 * will be later used to terminate the link.
2798 * Setting it to NULL explicitly will cause no
2799 * termination of the link.
2802 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2807 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2811 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2814 /* Defer clearing up the connection parameters
2815 * until closing to give a chance of keeping
2816 * them if a repairing happens.
2818 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2820 /* If disconnection is not requested, then
2821 * clear the connection variable so that the
2822 * link is not terminated.
2824 if (!cp->disconnect)
2828 if (cp->addr.type == BDADDR_LE_PUBLIC)
2829 addr_type = ADDR_LE_DEV_PUBLIC;
2831 addr_type = ADDR_LE_DEV_RANDOM;
2833 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2835 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2839 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2840 MGMT_STATUS_NOT_PAIRED, &rp,
2845 /* If the connection variable is set, then termination of the
2846 * link is requested.
2849 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2851 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2855 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2862 cmd->cmd_complete = addr_cmd_complete;
2864 dc.handle = cpu_to_le16(conn->handle);
2865 dc.reason = 0x13; /* Remote User Terminated Connection */
2866 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2868 mgmt_pending_remove(cmd);
2871 hci_dev_unlock(hdev);
2875 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2878 struct mgmt_cp_disconnect *cp = data;
2879 struct mgmt_rp_disconnect rp;
2880 struct mgmt_pending_cmd *cmd;
2881 struct hci_conn *conn;
2886 memset(&rp, 0, sizeof(rp));
2887 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2888 rp.addr.type = cp->addr.type;
2890 if (!bdaddr_type_is_valid(cp->addr.type))
2891 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2892 MGMT_STATUS_INVALID_PARAMS,
2897 if (!test_bit(HCI_UP, &hdev->flags)) {
2898 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2899 MGMT_STATUS_NOT_POWERED, &rp,
2904 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2905 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2906 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2910 if (cp->addr.type == BDADDR_BREDR)
2911 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2914 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2916 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2917 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2918 MGMT_STATUS_NOT_CONNECTED, &rp,
2923 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2929 cmd->cmd_complete = generic_cmd_complete;
2931 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2933 mgmt_pending_remove(cmd);
2936 hci_dev_unlock(hdev);
2940 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2942 switch (link_type) {
2944 switch (addr_type) {
2945 case ADDR_LE_DEV_PUBLIC:
2946 return BDADDR_LE_PUBLIC;
2949 /* Fallback to LE Random address type */
2950 return BDADDR_LE_RANDOM;
2954 /* Fallback to BR/EDR type */
2955 return BDADDR_BREDR;
2959 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2962 struct mgmt_rp_get_connections *rp;
2972 if (!hdev_is_powered(hdev)) {
2973 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2974 MGMT_STATUS_NOT_POWERED);
2979 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2980 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2984 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2985 rp = kmalloc(rp_len, GFP_KERNEL);
2992 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2993 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2995 bacpy(&rp->addr[i].bdaddr, &c->dst);
2996 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2997 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3002 rp->conn_count = cpu_to_le16(i);
3004 /* Recalculate length in case of filtered SCO connections, etc */
3005 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3007 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3013 hci_dev_unlock(hdev);
3017 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3018 struct mgmt_cp_pin_code_neg_reply *cp)
3020 struct mgmt_pending_cmd *cmd;
3023 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3028 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3029 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3031 mgmt_pending_remove(cmd);
3036 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3039 struct hci_conn *conn;
3040 struct mgmt_cp_pin_code_reply *cp = data;
3041 struct hci_cp_pin_code_reply reply;
3042 struct mgmt_pending_cmd *cmd;
3049 if (!hdev_is_powered(hdev)) {
3050 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3051 MGMT_STATUS_NOT_POWERED);
3055 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3057 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3058 MGMT_STATUS_NOT_CONNECTED);
3062 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3063 struct mgmt_cp_pin_code_neg_reply ncp;
3065 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3067 BT_ERR("PIN code is not 16 bytes long");
3069 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3071 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3072 MGMT_STATUS_INVALID_PARAMS);
3077 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3083 cmd->cmd_complete = addr_cmd_complete;
3085 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3086 reply.pin_len = cp->pin_len;
3087 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3089 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3091 mgmt_pending_remove(cmd);
3094 hci_dev_unlock(hdev);
3098 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3101 struct mgmt_cp_set_io_capability *cp = data;
3105 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3106 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3107 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3111 hdev->io_capability = cp->io_capability;
3113 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3114 hdev->io_capability);
3116 hci_dev_unlock(hdev);
3118 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3122 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3124 struct hci_dev *hdev = conn->hdev;
3125 struct mgmt_pending_cmd *cmd;
3127 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3128 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3131 if (cmd->user_data != conn)
3140 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3142 struct mgmt_rp_pair_device rp;
3143 struct hci_conn *conn = cmd->user_data;
3146 bacpy(&rp.addr.bdaddr, &conn->dst);
3147 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3149 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3150 status, &rp, sizeof(rp));
3152 /* So we don't get further callbacks for this connection */
3153 conn->connect_cfm_cb = NULL;
3154 conn->security_cfm_cb = NULL;
3155 conn->disconn_cfm_cb = NULL;
3157 hci_conn_drop(conn);
3159 /* The device is paired so there is no need to remove
3160 * its connection parameters anymore.
3162 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3169 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3171 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3172 struct mgmt_pending_cmd *cmd;
3174 cmd = find_pairing(conn);
3176 cmd->cmd_complete(cmd, status);
3177 mgmt_pending_remove(cmd);
3181 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3183 struct mgmt_pending_cmd *cmd;
3185 BT_DBG("status %u", status);
3187 cmd = find_pairing(conn);
3189 BT_DBG("Unable to find a pending command");
3193 cmd->cmd_complete(cmd, mgmt_status(status));
3194 mgmt_pending_remove(cmd);
3197 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3199 struct mgmt_pending_cmd *cmd;
3201 BT_DBG("status %u", status);
3206 cmd = find_pairing(conn);
3208 BT_DBG("Unable to find a pending command");
3212 cmd->cmd_complete(cmd, mgmt_status(status));
3213 mgmt_pending_remove(cmd);
3216 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3219 struct mgmt_cp_pair_device *cp = data;
3220 struct mgmt_rp_pair_device rp;
3221 struct mgmt_pending_cmd *cmd;
3222 u8 sec_level, auth_type;
3223 struct hci_conn *conn;
3228 memset(&rp, 0, sizeof(rp));
3229 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3230 rp.addr.type = cp->addr.type;
3232 if (!bdaddr_type_is_valid(cp->addr.type))
3233 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3234 MGMT_STATUS_INVALID_PARAMS,
3237 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3238 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3239 MGMT_STATUS_INVALID_PARAMS,
3244 if (!hdev_is_powered(hdev)) {
3245 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3246 MGMT_STATUS_NOT_POWERED, &rp,
3251 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3252 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3253 MGMT_STATUS_ALREADY_PAIRED, &rp,
3258 sec_level = BT_SECURITY_MEDIUM;
3259 auth_type = HCI_AT_DEDICATED_BONDING;
3261 if (cp->addr.type == BDADDR_BREDR) {
3262 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3267 /* Convert from L2CAP channel address type to HCI address type
3269 if (cp->addr.type == BDADDR_LE_PUBLIC)
3270 addr_type = ADDR_LE_DEV_PUBLIC;
3272 addr_type = ADDR_LE_DEV_RANDOM;
3274 /* When pairing a new device, it is expected to remember
3275 * this device for future connections. Adding the connection
3276 * parameter information ahead of time allows tracking
3277 * of the slave preferred values and will speed up any
3278 * further connection establishment.
3280 * If connection parameters already exist, then they
3281 * will be kept and this function does nothing.
3283 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3285 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3286 sec_level, HCI_LE_CONN_TIMEOUT,
3293 if (PTR_ERR(conn) == -EBUSY)
3294 status = MGMT_STATUS_BUSY;
3295 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3296 status = MGMT_STATUS_NOT_SUPPORTED;
3297 else if (PTR_ERR(conn) == -ECONNREFUSED)
3298 status = MGMT_STATUS_REJECTED;
3300 status = MGMT_STATUS_CONNECT_FAILED;
3302 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3303 status, &rp, sizeof(rp));
3307 if (conn->connect_cfm_cb) {
3308 hci_conn_drop(conn);
3309 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3310 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3314 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3317 hci_conn_drop(conn);
3321 cmd->cmd_complete = pairing_complete;
3323 /* For LE, just connecting isn't a proof that the pairing finished */
3324 if (cp->addr.type == BDADDR_BREDR) {
3325 conn->connect_cfm_cb = pairing_complete_cb;
3326 conn->security_cfm_cb = pairing_complete_cb;
3327 conn->disconn_cfm_cb = pairing_complete_cb;
3329 conn->connect_cfm_cb = le_pairing_complete_cb;
3330 conn->security_cfm_cb = le_pairing_complete_cb;
3331 conn->disconn_cfm_cb = le_pairing_complete_cb;
3334 conn->io_capability = cp->io_cap;
3335 cmd->user_data = hci_conn_get(conn);
3337 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3338 hci_conn_security(conn, sec_level, auth_type, true)) {
3339 cmd->cmd_complete(cmd, 0);
3340 mgmt_pending_remove(cmd);
3346 hci_dev_unlock(hdev);
3350 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3353 struct mgmt_addr_info *addr = data;
3354 struct mgmt_pending_cmd *cmd;
3355 struct hci_conn *conn;
3362 if (!hdev_is_powered(hdev)) {
3363 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3364 MGMT_STATUS_NOT_POWERED);
3368 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3370 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3371 MGMT_STATUS_INVALID_PARAMS);
3375 conn = cmd->user_data;
3377 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3378 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3379 MGMT_STATUS_INVALID_PARAMS);
3383 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3384 mgmt_pending_remove(cmd);
3386 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3387 addr, sizeof(*addr));
3389 hci_dev_unlock(hdev);
3393 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3394 struct mgmt_addr_info *addr, u16 mgmt_op,
3395 u16 hci_op, __le32 passkey)
3397 struct mgmt_pending_cmd *cmd;
3398 struct hci_conn *conn;
3403 if (!hdev_is_powered(hdev)) {
3404 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3405 MGMT_STATUS_NOT_POWERED, addr,
3410 if (addr->type == BDADDR_BREDR)
3411 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3413 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3416 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3417 MGMT_STATUS_NOT_CONNECTED, addr,
3422 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3423 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3425 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3426 MGMT_STATUS_SUCCESS, addr,
3429 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3430 MGMT_STATUS_FAILED, addr,
3436 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3442 cmd->cmd_complete = addr_cmd_complete;
3444 /* Continue with pairing via HCI */
3445 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3446 struct hci_cp_user_passkey_reply cp;
3448 bacpy(&cp.bdaddr, &addr->bdaddr);
3449 cp.passkey = passkey;
3450 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3452 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3456 mgmt_pending_remove(cmd);
3459 hci_dev_unlock(hdev);
3463 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3464 void *data, u16 len)
3466 struct mgmt_cp_pin_code_neg_reply *cp = data;
3470 return user_pairing_resp(sk, hdev, &cp->addr,
3471 MGMT_OP_PIN_CODE_NEG_REPLY,
3472 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3475 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3478 struct mgmt_cp_user_confirm_reply *cp = data;
3482 if (len != sizeof(*cp))
3483 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3484 MGMT_STATUS_INVALID_PARAMS);
3486 return user_pairing_resp(sk, hdev, &cp->addr,
3487 MGMT_OP_USER_CONFIRM_REPLY,
3488 HCI_OP_USER_CONFIRM_REPLY, 0);
3491 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3492 void *data, u16 len)
3494 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3498 return user_pairing_resp(sk, hdev, &cp->addr,
3499 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3500 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3503 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3506 struct mgmt_cp_user_passkey_reply *cp = data;
3510 return user_pairing_resp(sk, hdev, &cp->addr,
3511 MGMT_OP_USER_PASSKEY_REPLY,
3512 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3515 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3516 void *data, u16 len)
3518 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3522 return user_pairing_resp(sk, hdev, &cp->addr,
3523 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3524 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3527 static void update_name(struct hci_request *req)
3529 struct hci_dev *hdev = req->hdev;
3530 struct hci_cp_write_local_name cp;
3532 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3534 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3537 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3539 struct mgmt_cp_set_local_name *cp;
3540 struct mgmt_pending_cmd *cmd;
3542 BT_DBG("status 0x%02x", status);
3546 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3553 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3554 mgmt_status(status));
3556 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3559 mgmt_pending_remove(cmd);
3562 hci_dev_unlock(hdev);
3565 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3568 struct mgmt_cp_set_local_name *cp = data;
3569 struct mgmt_pending_cmd *cmd;
3570 struct hci_request req;
3577 /* If the old values are the same as the new ones just return a
3578 * direct command complete event.
3580 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3581 !memcmp(hdev->short_name, cp->short_name,
3582 sizeof(hdev->short_name))) {
3583 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3588 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3590 if (!hdev_is_powered(hdev)) {
3591 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3593 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3598 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3604 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3610 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3612 hci_req_init(&req, hdev);
3614 if (lmp_bredr_capable(hdev)) {
3619 /* The name is stored in the scan response data and so
3620 * no need to udpate the advertising data here.
3622 if (lmp_le_capable(hdev))
3623 update_scan_rsp_data(&req);
3625 err = hci_req_run(&req, set_name_complete);
3627 mgmt_pending_remove(cmd);
3630 hci_dev_unlock(hdev);
3634 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3635 void *data, u16 data_len)
3637 struct mgmt_pending_cmd *cmd;
3640 BT_DBG("%s", hdev->name);
3644 if (!hdev_is_powered(hdev)) {
3645 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3646 MGMT_STATUS_NOT_POWERED);
3650 if (!lmp_ssp_capable(hdev)) {
3651 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3652 MGMT_STATUS_NOT_SUPPORTED);
3656 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3657 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3662 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3668 if (bredr_sc_enabled(hdev))
3669 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3672 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3675 mgmt_pending_remove(cmd);
3678 hci_dev_unlock(hdev);
3682 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3683 void *data, u16 len)
3685 struct mgmt_addr_info *addr = data;
3688 BT_DBG("%s ", hdev->name);
3690 if (!bdaddr_type_is_valid(addr->type))
3691 return mgmt_cmd_complete(sk, hdev->id,
3692 MGMT_OP_ADD_REMOTE_OOB_DATA,
3693 MGMT_STATUS_INVALID_PARAMS,
3694 addr, sizeof(*addr));
3698 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3699 struct mgmt_cp_add_remote_oob_data *cp = data;
3702 if (cp->addr.type != BDADDR_BREDR) {
3703 err = mgmt_cmd_complete(sk, hdev->id,
3704 MGMT_OP_ADD_REMOTE_OOB_DATA,
3705 MGMT_STATUS_INVALID_PARAMS,
3706 &cp->addr, sizeof(cp->addr));
3710 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3711 cp->addr.type, cp->hash,
3712 cp->rand, NULL, NULL);
3714 status = MGMT_STATUS_FAILED;
3716 status = MGMT_STATUS_SUCCESS;
3718 err = mgmt_cmd_complete(sk, hdev->id,
3719 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3720 &cp->addr, sizeof(cp->addr));
3721 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3722 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3723 u8 *rand192, *hash192, *rand256, *hash256;
3726 if (bdaddr_type_is_le(cp->addr.type)) {
3727 /* Enforce zero-valued 192-bit parameters as
3728 * long as legacy SMP OOB isn't implemented.
3730 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3731 memcmp(cp->hash192, ZERO_KEY, 16)) {
3732 err = mgmt_cmd_complete(sk, hdev->id,
3733 MGMT_OP_ADD_REMOTE_OOB_DATA,
3734 MGMT_STATUS_INVALID_PARAMS,
3735 addr, sizeof(*addr));
3742 /* In case one of the P-192 values is set to zero,
3743 * then just disable OOB data for P-192.
3745 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3746 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3750 rand192 = cp->rand192;
3751 hash192 = cp->hash192;
3755 /* In case one of the P-256 values is set to zero, then just
3756 * disable OOB data for P-256.
3758 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3759 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3763 rand256 = cp->rand256;
3764 hash256 = cp->hash256;
3767 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3768 cp->addr.type, hash192, rand192,
3771 status = MGMT_STATUS_FAILED;
3773 status = MGMT_STATUS_SUCCESS;
3775 err = mgmt_cmd_complete(sk, hdev->id,
3776 MGMT_OP_ADD_REMOTE_OOB_DATA,
3777 status, &cp->addr, sizeof(cp->addr));
3779 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3780 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3781 MGMT_STATUS_INVALID_PARAMS);
3785 hci_dev_unlock(hdev);
3789 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3790 void *data, u16 len)
3792 struct mgmt_cp_remove_remote_oob_data *cp = data;
3796 BT_DBG("%s", hdev->name);
3798 if (cp->addr.type != BDADDR_BREDR)
3799 return mgmt_cmd_complete(sk, hdev->id,
3800 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3801 MGMT_STATUS_INVALID_PARAMS,
3802 &cp->addr, sizeof(cp->addr));
3806 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3807 hci_remote_oob_data_clear(hdev);
3808 status = MGMT_STATUS_SUCCESS;
3812 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3814 status = MGMT_STATUS_INVALID_PARAMS;
3816 status = MGMT_STATUS_SUCCESS;
3819 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3820 status, &cp->addr, sizeof(cp->addr));
3822 hci_dev_unlock(hdev);
3826 static bool trigger_discovery(struct hci_request *req, u8 *status)
3828 struct hci_dev *hdev = req->hdev;
3829 struct hci_cp_le_set_scan_param param_cp;
3830 struct hci_cp_le_set_scan_enable enable_cp;
3831 struct hci_cp_inquiry inq_cp;
3832 /* General inquiry access code (GIAC) */
3833 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3837 switch (hdev->discovery.type) {
3838 case DISCOV_TYPE_BREDR:
3839 *status = mgmt_bredr_support(hdev);
3843 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3844 *status = MGMT_STATUS_BUSY;
3848 hci_inquiry_cache_flush(hdev);
3850 memset(&inq_cp, 0, sizeof(inq_cp));
3851 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3852 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3853 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3856 case DISCOV_TYPE_LE:
3857 case DISCOV_TYPE_INTERLEAVED:
3858 *status = mgmt_le_support(hdev);
3862 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3863 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3864 *status = MGMT_STATUS_NOT_SUPPORTED;
3868 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3869 /* Don't let discovery abort an outgoing
3870 * connection attempt that's using directed
3873 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3875 *status = MGMT_STATUS_REJECTED;
3879 disable_advertising(req);
3882 /* If controller is scanning, it means the background scanning
3883 * is running. Thus, we should temporarily stop it in order to
3884 * set the discovery scanning parameters.
3886 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3887 hci_req_add_le_scan_disable(req);
3889 memset(¶m_cp, 0, sizeof(param_cp));
3891 /* All active scans will be done with either a resolvable
3892 * private address (when privacy feature has been enabled)
3893 * or non-resolvable private address.
3895 err = hci_update_random_address(req, true, &own_addr_type);
3897 *status = MGMT_STATUS_FAILED;
3901 param_cp.type = LE_SCAN_ACTIVE;
3902 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3903 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3904 param_cp.own_address_type = own_addr_type;
3905 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3908 memset(&enable_cp, 0, sizeof(enable_cp));
3909 enable_cp.enable = LE_SCAN_ENABLE;
3910 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3911 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3916 *status = MGMT_STATUS_INVALID_PARAMS;
3923 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3926 struct mgmt_pending_cmd *cmd;
3927 unsigned long timeout;
3929 BT_DBG("status %d", status);
3933 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3935 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3938 cmd->cmd_complete(cmd, mgmt_status(status));
3939 mgmt_pending_remove(cmd);
3943 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3947 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3949 /* If the scan involves LE scan, pick proper timeout to schedule
3950 * hdev->le_scan_disable that will stop it.
3952 switch (hdev->discovery.type) {
3953 case DISCOV_TYPE_LE:
3954 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3956 case DISCOV_TYPE_INTERLEAVED:
3957 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3959 case DISCOV_TYPE_BREDR:
3963 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3969 /* When service discovery is used and the controller has
3970 * a strict duplicate filter, it is important to remember
3971 * the start and duration of the scan. This is required
3972 * for restarting scanning during the discovery phase.
3974 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
3976 hdev->discovery.result_filtering) {
3977 hdev->discovery.scan_start = jiffies;
3978 hdev->discovery.scan_duration = timeout;
3981 queue_delayed_work(hdev->workqueue,
3982 &hdev->le_scan_disable, timeout);
3986 hci_dev_unlock(hdev);
3989 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3990 void *data, u16 len)
3992 struct mgmt_cp_start_discovery *cp = data;
3993 struct mgmt_pending_cmd *cmd;
3994 struct hci_request req;
3998 BT_DBG("%s", hdev->name);
4002 if (!hdev_is_powered(hdev)) {
4003 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4004 MGMT_STATUS_NOT_POWERED,
4005 &cp->type, sizeof(cp->type));
4009 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4010 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
4011 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4012 MGMT_STATUS_BUSY, &cp->type,
4017 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4023 cmd->cmd_complete = generic_cmd_complete;
4025 /* Clear the discovery filter first to free any previously
4026 * allocated memory for the UUID list.
4028 hci_discovery_filter_clear(hdev);
4030 hdev->discovery.type = cp->type;
4031 hdev->discovery.report_invalid_rssi = false;
4033 hci_req_init(&req, hdev);
4035 if (!trigger_discovery(&req, &status)) {
4036 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4037 status, &cp->type, sizeof(cp->type));
4038 mgmt_pending_remove(cmd);
4042 err = hci_req_run(&req, start_discovery_complete);
4044 mgmt_pending_remove(cmd);
4048 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4051 hci_dev_unlock(hdev);
4055 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4058 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4062 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4063 void *data, u16 len)
4065 struct mgmt_cp_start_service_discovery *cp = data;
4066 struct mgmt_pending_cmd *cmd;
4067 struct hci_request req;
4068 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4069 u16 uuid_count, expected_len;
4073 BT_DBG("%s", hdev->name);
4077 if (!hdev_is_powered(hdev)) {
4078 err = mgmt_cmd_complete(sk, hdev->id,
4079 MGMT_OP_START_SERVICE_DISCOVERY,
4080 MGMT_STATUS_NOT_POWERED,
4081 &cp->type, sizeof(cp->type));
4085 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4086 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
4087 err = mgmt_cmd_complete(sk, hdev->id,
4088 MGMT_OP_START_SERVICE_DISCOVERY,
4089 MGMT_STATUS_BUSY, &cp->type,
4094 uuid_count = __le16_to_cpu(cp->uuid_count);
4095 if (uuid_count > max_uuid_count) {
4096 BT_ERR("service_discovery: too big uuid_count value %u",
4098 err = mgmt_cmd_complete(sk, hdev->id,
4099 MGMT_OP_START_SERVICE_DISCOVERY,
4100 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4105 expected_len = sizeof(*cp) + uuid_count * 16;
4106 if (expected_len != len) {
4107 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4109 err = mgmt_cmd_complete(sk, hdev->id,
4110 MGMT_OP_START_SERVICE_DISCOVERY,
4111 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4116 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4123 cmd->cmd_complete = service_discovery_cmd_complete;
4125 /* Clear the discovery filter first to free any previously
4126 * allocated memory for the UUID list.
4128 hci_discovery_filter_clear(hdev);
4130 hdev->discovery.result_filtering = true;
4131 hdev->discovery.type = cp->type;
4132 hdev->discovery.rssi = cp->rssi;
4133 hdev->discovery.uuid_count = uuid_count;
4135 if (uuid_count > 0) {
4136 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4138 if (!hdev->discovery.uuids) {
4139 err = mgmt_cmd_complete(sk, hdev->id,
4140 MGMT_OP_START_SERVICE_DISCOVERY,
4142 &cp->type, sizeof(cp->type));
4143 mgmt_pending_remove(cmd);
4148 hci_req_init(&req, hdev);
4150 if (!trigger_discovery(&req, &status)) {
4151 err = mgmt_cmd_complete(sk, hdev->id,
4152 MGMT_OP_START_SERVICE_DISCOVERY,
4153 status, &cp->type, sizeof(cp->type));
4154 mgmt_pending_remove(cmd);
4158 err = hci_req_run(&req, start_discovery_complete);
4160 mgmt_pending_remove(cmd);
4164 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4167 hci_dev_unlock(hdev);
4171 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4173 struct mgmt_pending_cmd *cmd;
4175 BT_DBG("status %d", status);
4179 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4181 cmd->cmd_complete(cmd, mgmt_status(status));
4182 mgmt_pending_remove(cmd);
4186 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4188 hci_dev_unlock(hdev);
4191 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4194 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4195 struct mgmt_pending_cmd *cmd;
4196 struct hci_request req;
4199 BT_DBG("%s", hdev->name);
4203 if (!hci_discovery_active(hdev)) {
4204 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4205 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4206 sizeof(mgmt_cp->type));
4210 if (hdev->discovery.type != mgmt_cp->type) {
4211 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4212 MGMT_STATUS_INVALID_PARAMS,
4213 &mgmt_cp->type, sizeof(mgmt_cp->type));
4217 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4223 cmd->cmd_complete = generic_cmd_complete;
4225 hci_req_init(&req, hdev);
4227 hci_stop_discovery(&req);
4229 err = hci_req_run(&req, stop_discovery_complete);
4231 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4235 mgmt_pending_remove(cmd);
4237 /* If no HCI commands were sent we're done */
4238 if (err == -ENODATA) {
4239 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4240 &mgmt_cp->type, sizeof(mgmt_cp->type));
4241 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4245 hci_dev_unlock(hdev);
4249 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4252 struct mgmt_cp_confirm_name *cp = data;
4253 struct inquiry_entry *e;
4256 BT_DBG("%s", hdev->name);
4260 if (!hci_discovery_active(hdev)) {
4261 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4262 MGMT_STATUS_FAILED, &cp->addr,
4267 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4269 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4270 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4275 if (cp->name_known) {
4276 e->name_state = NAME_KNOWN;
4279 e->name_state = NAME_NEEDED;
4280 hci_inquiry_cache_update_resolve(hdev, e);
4283 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4284 &cp->addr, sizeof(cp->addr));
4287 hci_dev_unlock(hdev);
4291 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4294 struct mgmt_cp_block_device *cp = data;
4298 BT_DBG("%s", hdev->name);
4300 if (!bdaddr_type_is_valid(cp->addr.type))
4301 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4302 MGMT_STATUS_INVALID_PARAMS,
4303 &cp->addr, sizeof(cp->addr));
4307 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4310 status = MGMT_STATUS_FAILED;
4314 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4316 status = MGMT_STATUS_SUCCESS;
4319 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4320 &cp->addr, sizeof(cp->addr));
4322 hci_dev_unlock(hdev);
4327 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4330 struct mgmt_cp_unblock_device *cp = data;
4334 BT_DBG("%s", hdev->name);
4336 if (!bdaddr_type_is_valid(cp->addr.type))
4337 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4338 MGMT_STATUS_INVALID_PARAMS,
4339 &cp->addr, sizeof(cp->addr));
4343 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4346 status = MGMT_STATUS_INVALID_PARAMS;
4350 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4352 status = MGMT_STATUS_SUCCESS;
4355 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4356 &cp->addr, sizeof(cp->addr));
4358 hci_dev_unlock(hdev);
4363 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4366 struct mgmt_cp_set_device_id *cp = data;
4367 struct hci_request req;
4371 BT_DBG("%s", hdev->name);
4373 source = __le16_to_cpu(cp->source);
4375 if (source > 0x0002)
4376 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4377 MGMT_STATUS_INVALID_PARAMS);
4381 hdev->devid_source = source;
4382 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4383 hdev->devid_product = __le16_to_cpu(cp->product);
4384 hdev->devid_version = __le16_to_cpu(cp->version);
4386 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4389 hci_req_init(&req, hdev);
4391 hci_req_run(&req, NULL);
4393 hci_dev_unlock(hdev);
4398 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4401 struct cmd_lookup match = { NULL, hdev };
4406 u8 mgmt_err = mgmt_status(status);
4408 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4409 cmd_status_rsp, &mgmt_err);
4413 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4414 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4416 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4418 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4421 new_settings(hdev, match.sk);
4427 hci_dev_unlock(hdev);
4430 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4433 struct mgmt_mode *cp = data;
4434 struct mgmt_pending_cmd *cmd;
4435 struct hci_request req;
4439 BT_DBG("request for %s", hdev->name);
4441 status = mgmt_le_support(hdev);
4443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4446 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4448 MGMT_STATUS_INVALID_PARAMS);
4454 /* The following conditions are ones which mean that we should
4455 * not do any HCI communication but directly send a mgmt
4456 * response to user space (after toggling the flag if
4459 if (!hdev_is_powered(hdev) ||
4460 (val == test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
4461 (cp->val == 0x02) == test_bit(HCI_ADVERTISING_CONNECTABLE,
4462 &hdev->dev_flags)) ||
4463 hci_conn_num(hdev, LE_LINK) > 0 ||
4464 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4465 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4469 changed = !test_and_set_bit(HCI_ADVERTISING,
4471 if (cp->val == 0x02)
4472 set_bit(HCI_ADVERTISING_CONNECTABLE,
4475 clear_bit(HCI_ADVERTISING_CONNECTABLE,
4478 changed = test_and_clear_bit(HCI_ADVERTISING,
4480 clear_bit(HCI_ADVERTISING_CONNECTABLE,
4484 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4489 err = new_settings(hdev, sk);
4494 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4495 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4496 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4501 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4507 hci_req_init(&req, hdev);
4509 if (cp->val == 0x02)
4510 set_bit(HCI_ADVERTISING_CONNECTABLE, &hdev->dev_flags);
4512 clear_bit(HCI_ADVERTISING_CONNECTABLE, &hdev->dev_flags);
4515 enable_advertising(&req);
4517 disable_advertising(&req);
4519 err = hci_req_run(&req, set_advertising_complete);
4521 mgmt_pending_remove(cmd);
4524 hci_dev_unlock(hdev);
4528 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4529 void *data, u16 len)
4531 struct mgmt_cp_set_static_address *cp = data;
4534 BT_DBG("%s", hdev->name);
4536 if (!lmp_le_capable(hdev))
4537 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4538 MGMT_STATUS_NOT_SUPPORTED);
4540 if (hdev_is_powered(hdev))
4541 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4542 MGMT_STATUS_REJECTED);
4544 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4545 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4546 return mgmt_cmd_status(sk, hdev->id,
4547 MGMT_OP_SET_STATIC_ADDRESS,
4548 MGMT_STATUS_INVALID_PARAMS);
4550 /* Two most significant bits shall be set */
4551 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4552 return mgmt_cmd_status(sk, hdev->id,
4553 MGMT_OP_SET_STATIC_ADDRESS,
4554 MGMT_STATUS_INVALID_PARAMS);
4559 bacpy(&hdev->static_addr, &cp->bdaddr);
4561 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4565 err = new_settings(hdev, sk);
4568 hci_dev_unlock(hdev);
4572 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4573 void *data, u16 len)
4575 struct mgmt_cp_set_scan_params *cp = data;
4576 __u16 interval, window;
4579 BT_DBG("%s", hdev->name);
4581 if (!lmp_le_capable(hdev))
4582 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4583 MGMT_STATUS_NOT_SUPPORTED);
4585 interval = __le16_to_cpu(cp->interval);
4587 if (interval < 0x0004 || interval > 0x4000)
4588 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4589 MGMT_STATUS_INVALID_PARAMS);
4591 window = __le16_to_cpu(cp->window);
4593 if (window < 0x0004 || window > 0x4000)
4594 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4595 MGMT_STATUS_INVALID_PARAMS);
4597 if (window > interval)
4598 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4599 MGMT_STATUS_INVALID_PARAMS);
4603 hdev->le_scan_interval = interval;
4604 hdev->le_scan_window = window;
4606 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4609 /* If background scan is running, restart it so new parameters are
4612 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4613 hdev->discovery.state == DISCOVERY_STOPPED) {
4614 struct hci_request req;
4616 hci_req_init(&req, hdev);
4618 hci_req_add_le_scan_disable(&req);
4619 hci_req_add_le_passive_scan(&req);
4621 hci_req_run(&req, NULL);
4624 hci_dev_unlock(hdev);
4629 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4632 struct mgmt_pending_cmd *cmd;
4634 BT_DBG("status 0x%02x", status);
4638 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4643 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4644 mgmt_status(status));
4646 struct mgmt_mode *cp = cmd->param;
4649 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4651 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4653 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4654 new_settings(hdev, cmd->sk);
4657 mgmt_pending_remove(cmd);
4660 hci_dev_unlock(hdev);
4663 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4664 void *data, u16 len)
4666 struct mgmt_mode *cp = data;
4667 struct mgmt_pending_cmd *cmd;
4668 struct hci_request req;
4671 BT_DBG("%s", hdev->name);
4673 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4674 hdev->hci_ver < BLUETOOTH_VER_1_2)
4675 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4676 MGMT_STATUS_NOT_SUPPORTED);
4678 if (cp->val != 0x00 && cp->val != 0x01)
4679 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4680 MGMT_STATUS_INVALID_PARAMS);
4684 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4685 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4690 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4691 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4696 if (!hdev_is_powered(hdev)) {
4697 change_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4698 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4700 new_settings(hdev, sk);
4704 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4711 hci_req_init(&req, hdev);
4713 write_fast_connectable(&req, cp->val);
4715 err = hci_req_run(&req, fast_connectable_complete);
4717 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4718 MGMT_STATUS_FAILED);
4719 mgmt_pending_remove(cmd);
4723 hci_dev_unlock(hdev);
4728 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4730 struct mgmt_pending_cmd *cmd;
4732 BT_DBG("status 0x%02x", status);
4736 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4741 u8 mgmt_err = mgmt_status(status);
4743 /* We need to restore the flag if related HCI commands
4746 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4748 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4750 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4751 new_settings(hdev, cmd->sk);
4754 mgmt_pending_remove(cmd);
4757 hci_dev_unlock(hdev);
4760 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4762 struct mgmt_mode *cp = data;
4763 struct mgmt_pending_cmd *cmd;
4764 struct hci_request req;
4767 BT_DBG("request for %s", hdev->name);
4769 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4770 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4771 MGMT_STATUS_NOT_SUPPORTED);
4773 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4774 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4775 MGMT_STATUS_REJECTED);
4777 if (cp->val != 0x00 && cp->val != 0x01)
4778 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4779 MGMT_STATUS_INVALID_PARAMS);
4783 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4784 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4788 if (!hdev_is_powered(hdev)) {
4790 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4791 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4792 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4793 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4794 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4797 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4799 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4803 err = new_settings(hdev, sk);
4807 /* Reject disabling when powered on */
4809 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4810 MGMT_STATUS_REJECTED);
4813 /* When configuring a dual-mode controller to operate
4814 * with LE only and using a static address, then switching
4815 * BR/EDR back on is not allowed.
4817 * Dual-mode controllers shall operate with the public
4818 * address as its identity address for BR/EDR and LE. So
4819 * reject the attempt to create an invalid configuration.
4821 * The same restrictions applies when secure connections
4822 * has been enabled. For BR/EDR this is a controller feature
4823 * while for LE it is a host stack feature. This means that
4824 * switching BR/EDR back on when secure connections has been
4825 * enabled is not a supported transaction.
4827 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4828 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4829 test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) {
4830 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4831 MGMT_STATUS_REJECTED);
4836 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4837 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4842 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4848 /* We need to flip the bit already here so that update_adv_data
4849 * generates the correct flags.
4851 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4853 hci_req_init(&req, hdev);
4855 write_fast_connectable(&req, false);
4856 __hci_update_page_scan(&req);
4858 /* Since only the advertising data flags will change, there
4859 * is no need to update the scan response data.
4861 update_adv_data(&req);
4863 err = hci_req_run(&req, set_bredr_complete);
4865 mgmt_pending_remove(cmd);
4868 hci_dev_unlock(hdev);
4872 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4874 struct mgmt_pending_cmd *cmd;
4875 struct mgmt_mode *cp;
4877 BT_DBG("%s status %u", hdev->name, status);
4881 cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4886 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4887 mgmt_status(status));
4895 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4896 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4899 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4900 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4903 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4904 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4908 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4909 new_settings(hdev, cmd->sk);
4912 mgmt_pending_remove(cmd);
4914 hci_dev_unlock(hdev);
4917 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4918 void *data, u16 len)
4920 struct mgmt_mode *cp = data;
4921 struct mgmt_pending_cmd *cmd;
4922 struct hci_request req;
4926 BT_DBG("request for %s", hdev->name);
4928 if (!lmp_sc_capable(hdev) &&
4929 !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4930 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4931 MGMT_STATUS_NOT_SUPPORTED);
4933 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4934 lmp_sc_capable(hdev) &&
4935 !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4936 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4937 MGMT_STATUS_REJECTED);
4939 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4940 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4941 MGMT_STATUS_INVALID_PARAMS);
4945 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4946 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4950 changed = !test_and_set_bit(HCI_SC_ENABLED,
4952 if (cp->val == 0x02)
4953 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4955 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4957 changed = test_and_clear_bit(HCI_SC_ENABLED,
4959 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4962 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4967 err = new_settings(hdev, sk);
4972 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4973 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4980 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4981 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4982 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4986 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4992 hci_req_init(&req, hdev);
4993 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4994 err = hci_req_run(&req, sc_enable_complete);
4996 mgmt_pending_remove(cmd);
5001 hci_dev_unlock(hdev);
5005 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5006 void *data, u16 len)
5008 struct mgmt_mode *cp = data;
5009 bool changed, use_changed;
5012 BT_DBG("request for %s", hdev->name);
5014 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5015 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5016 MGMT_STATUS_INVALID_PARAMS);
5021 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
5024 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
5027 if (cp->val == 0x02)
5028 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
5031 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
5034 if (hdev_is_powered(hdev) && use_changed &&
5035 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
5036 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5037 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5038 sizeof(mode), &mode);
5041 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5046 err = new_settings(hdev, sk);
5049 hci_dev_unlock(hdev);
5053 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5056 struct mgmt_cp_set_privacy *cp = cp_data;
5060 BT_DBG("request for %s", hdev->name);
5062 if (!lmp_le_capable(hdev))
5063 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5064 MGMT_STATUS_NOT_SUPPORTED);
5066 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5067 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5068 MGMT_STATUS_INVALID_PARAMS);
5070 if (hdev_is_powered(hdev))
5071 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5072 MGMT_STATUS_REJECTED);
5076 /* If user space supports this command it is also expected to
5077 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5079 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5082 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
5083 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5084 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
5086 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
5087 memset(hdev->irk, 0, sizeof(hdev->irk));
5088 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
5091 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5096 err = new_settings(hdev, sk);
5099 hci_dev_unlock(hdev);
5103 static bool irk_is_valid(struct mgmt_irk_info *irk)
5105 switch (irk->addr.type) {
5106 case BDADDR_LE_PUBLIC:
5109 case BDADDR_LE_RANDOM:
5110 /* Two most significant bits shall be set */
5111 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5119 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5122 struct mgmt_cp_load_irks *cp = cp_data;
5123 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5124 sizeof(struct mgmt_irk_info));
5125 u16 irk_count, expected_len;
5128 BT_DBG("request for %s", hdev->name);
5130 if (!lmp_le_capable(hdev))
5131 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5132 MGMT_STATUS_NOT_SUPPORTED);
5134 irk_count = __le16_to_cpu(cp->irk_count);
5135 if (irk_count > max_irk_count) {
5136 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5137 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5138 MGMT_STATUS_INVALID_PARAMS);
5141 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5142 if (expected_len != len) {
5143 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5145 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5146 MGMT_STATUS_INVALID_PARAMS);
5149 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5151 for (i = 0; i < irk_count; i++) {
5152 struct mgmt_irk_info *key = &cp->irks[i];
5154 if (!irk_is_valid(key))
5155 return mgmt_cmd_status(sk, hdev->id,
5157 MGMT_STATUS_INVALID_PARAMS);
5162 hci_smp_irks_clear(hdev);
5164 for (i = 0; i < irk_count; i++) {
5165 struct mgmt_irk_info *irk = &cp->irks[i];
5168 if (irk->addr.type == BDADDR_LE_PUBLIC)
5169 addr_type = ADDR_LE_DEV_PUBLIC;
5171 addr_type = ADDR_LE_DEV_RANDOM;
5173 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5177 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5179 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5181 hci_dev_unlock(hdev);
5186 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5188 if (key->master != 0x00 && key->master != 0x01)
5191 switch (key->addr.type) {
5192 case BDADDR_LE_PUBLIC:
5195 case BDADDR_LE_RANDOM:
5196 /* Two most significant bits shall be set */
5197 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5205 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5206 void *cp_data, u16 len)
5208 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5209 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5210 sizeof(struct mgmt_ltk_info));
5211 u16 key_count, expected_len;
5214 BT_DBG("request for %s", hdev->name);
5216 if (!lmp_le_capable(hdev))
5217 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5218 MGMT_STATUS_NOT_SUPPORTED);
5220 key_count = __le16_to_cpu(cp->key_count);
5221 if (key_count > max_key_count) {
5222 BT_ERR("load_ltks: too big key_count value %u", key_count);
5223 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5224 MGMT_STATUS_INVALID_PARAMS);
5227 expected_len = sizeof(*cp) + key_count *
5228 sizeof(struct mgmt_ltk_info);
5229 if (expected_len != len) {
5230 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5232 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5233 MGMT_STATUS_INVALID_PARAMS);
5236 BT_DBG("%s key_count %u", hdev->name, key_count);
5238 for (i = 0; i < key_count; i++) {
5239 struct mgmt_ltk_info *key = &cp->keys[i];
5241 if (!ltk_is_valid(key))
5242 return mgmt_cmd_status(sk, hdev->id,
5243 MGMT_OP_LOAD_LONG_TERM_KEYS,
5244 MGMT_STATUS_INVALID_PARAMS);
5249 hci_smp_ltks_clear(hdev);
5251 for (i = 0; i < key_count; i++) {
5252 struct mgmt_ltk_info *key = &cp->keys[i];
5253 u8 type, addr_type, authenticated;
5255 if (key->addr.type == BDADDR_LE_PUBLIC)
5256 addr_type = ADDR_LE_DEV_PUBLIC;
5258 addr_type = ADDR_LE_DEV_RANDOM;
5260 switch (key->type) {
5261 case MGMT_LTK_UNAUTHENTICATED:
5262 authenticated = 0x00;
5263 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5265 case MGMT_LTK_AUTHENTICATED:
5266 authenticated = 0x01;
5267 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5269 case MGMT_LTK_P256_UNAUTH:
5270 authenticated = 0x00;
5271 type = SMP_LTK_P256;
5273 case MGMT_LTK_P256_AUTH:
5274 authenticated = 0x01;
5275 type = SMP_LTK_P256;
5277 case MGMT_LTK_P256_DEBUG:
5278 authenticated = 0x00;
5279 type = SMP_LTK_P256_DEBUG;
5284 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5285 authenticated, key->val, key->enc_size, key->ediv,
5289 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5292 hci_dev_unlock(hdev);
5297 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5299 struct hci_conn *conn = cmd->user_data;
5300 struct mgmt_rp_get_conn_info rp;
5303 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5305 if (status == MGMT_STATUS_SUCCESS) {
5306 rp.rssi = conn->rssi;
5307 rp.tx_power = conn->tx_power;
5308 rp.max_tx_power = conn->max_tx_power;
5310 rp.rssi = HCI_RSSI_INVALID;
5311 rp.tx_power = HCI_TX_POWER_INVALID;
5312 rp.max_tx_power = HCI_TX_POWER_INVALID;
5315 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5316 status, &rp, sizeof(rp));
5318 hci_conn_drop(conn);
5324 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5327 struct hci_cp_read_rssi *cp;
5328 struct mgmt_pending_cmd *cmd;
5329 struct hci_conn *conn;
5333 BT_DBG("status 0x%02x", hci_status);
5337 /* Commands sent in request are either Read RSSI or Read Transmit Power
5338 * Level so we check which one was last sent to retrieve connection
5339 * handle. Both commands have handle as first parameter so it's safe to
5340 * cast data on the same command struct.
5342 * First command sent is always Read RSSI and we fail only if it fails.
5343 * In other case we simply override error to indicate success as we
5344 * already remembered if TX power value is actually valid.
5346 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5348 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5349 status = MGMT_STATUS_SUCCESS;
5351 status = mgmt_status(hci_status);
5355 BT_ERR("invalid sent_cmd in conn_info response");
5359 handle = __le16_to_cpu(cp->handle);
5360 conn = hci_conn_hash_lookup_handle(hdev, handle);
5362 BT_ERR("unknown handle (%d) in conn_info response", handle);
5366 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5370 cmd->cmd_complete(cmd, status);
5371 mgmt_pending_remove(cmd);
5374 hci_dev_unlock(hdev);
5377 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5380 struct mgmt_cp_get_conn_info *cp = data;
5381 struct mgmt_rp_get_conn_info rp;
5382 struct hci_conn *conn;
5383 unsigned long conn_info_age;
5386 BT_DBG("%s", hdev->name);
5388 memset(&rp, 0, sizeof(rp));
5389 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5390 rp.addr.type = cp->addr.type;
5392 if (!bdaddr_type_is_valid(cp->addr.type))
5393 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5394 MGMT_STATUS_INVALID_PARAMS,
5399 if (!hdev_is_powered(hdev)) {
5400 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5401 MGMT_STATUS_NOT_POWERED, &rp,
5406 if (cp->addr.type == BDADDR_BREDR)
5407 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5410 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5412 if (!conn || conn->state != BT_CONNECTED) {
5413 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5414 MGMT_STATUS_NOT_CONNECTED, &rp,
5419 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5420 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5421 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5425 /* To avoid client trying to guess when to poll again for information we
5426 * calculate conn info age as random value between min/max set in hdev.
5428 conn_info_age = hdev->conn_info_min_age +
5429 prandom_u32_max(hdev->conn_info_max_age -
5430 hdev->conn_info_min_age);
5432 /* Query controller to refresh cached values if they are too old or were
5435 if (time_after(jiffies, conn->conn_info_timestamp +
5436 msecs_to_jiffies(conn_info_age)) ||
5437 !conn->conn_info_timestamp) {
5438 struct hci_request req;
5439 struct hci_cp_read_tx_power req_txp_cp;
5440 struct hci_cp_read_rssi req_rssi_cp;
5441 struct mgmt_pending_cmd *cmd;
5443 hci_req_init(&req, hdev);
5444 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5445 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5448 /* For LE links TX power does not change thus we don't need to
5449 * query for it once value is known.
5451 if (!bdaddr_type_is_le(cp->addr.type) ||
5452 conn->tx_power == HCI_TX_POWER_INVALID) {
5453 req_txp_cp.handle = cpu_to_le16(conn->handle);
5454 req_txp_cp.type = 0x00;
5455 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5456 sizeof(req_txp_cp), &req_txp_cp);
5459 /* Max TX power needs to be read only once per connection */
5460 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5461 req_txp_cp.handle = cpu_to_le16(conn->handle);
5462 req_txp_cp.type = 0x01;
5463 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5464 sizeof(req_txp_cp), &req_txp_cp);
5467 err = hci_req_run(&req, conn_info_refresh_complete);
5471 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5478 hci_conn_hold(conn);
5479 cmd->user_data = hci_conn_get(conn);
5480 cmd->cmd_complete = conn_info_cmd_complete;
5482 conn->conn_info_timestamp = jiffies;
5484 /* Cache is valid, just reply with values cached in hci_conn */
5485 rp.rssi = conn->rssi;
5486 rp.tx_power = conn->tx_power;
5487 rp.max_tx_power = conn->max_tx_power;
5489 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5490 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5494 hci_dev_unlock(hdev);
5498 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5500 struct hci_conn *conn = cmd->user_data;
5501 struct mgmt_rp_get_clock_info rp;
5502 struct hci_dev *hdev;
5505 memset(&rp, 0, sizeof(rp));
5506 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5511 hdev = hci_dev_get(cmd->index);
5513 rp.local_clock = cpu_to_le32(hdev->clock);
5518 rp.piconet_clock = cpu_to_le32(conn->clock);
5519 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5523 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5527 hci_conn_drop(conn);
5534 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5536 struct hci_cp_read_clock *hci_cp;
5537 struct mgmt_pending_cmd *cmd;
5538 struct hci_conn *conn;
5540 BT_DBG("%s status %u", hdev->name, status);
5544 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5548 if (hci_cp->which) {
5549 u16 handle = __le16_to_cpu(hci_cp->handle);
5550 conn = hci_conn_hash_lookup_handle(hdev, handle);
5555 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5559 cmd->cmd_complete(cmd, mgmt_status(status));
5560 mgmt_pending_remove(cmd);
5563 hci_dev_unlock(hdev);
5566 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5569 struct mgmt_cp_get_clock_info *cp = data;
5570 struct mgmt_rp_get_clock_info rp;
5571 struct hci_cp_read_clock hci_cp;
5572 struct mgmt_pending_cmd *cmd;
5573 struct hci_request req;
5574 struct hci_conn *conn;
5577 BT_DBG("%s", hdev->name);
5579 memset(&rp, 0, sizeof(rp));
5580 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5581 rp.addr.type = cp->addr.type;
5583 if (cp->addr.type != BDADDR_BREDR)
5584 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5585 MGMT_STATUS_INVALID_PARAMS,
5590 if (!hdev_is_powered(hdev)) {
5591 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5592 MGMT_STATUS_NOT_POWERED, &rp,
5597 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5598 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5600 if (!conn || conn->state != BT_CONNECTED) {
5601 err = mgmt_cmd_complete(sk, hdev->id,
5602 MGMT_OP_GET_CLOCK_INFO,
5603 MGMT_STATUS_NOT_CONNECTED,
5611 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5617 cmd->cmd_complete = clock_info_cmd_complete;
5619 hci_req_init(&req, hdev);
5621 memset(&hci_cp, 0, sizeof(hci_cp));
5622 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5625 hci_conn_hold(conn);
5626 cmd->user_data = hci_conn_get(conn);
5628 hci_cp.handle = cpu_to_le16(conn->handle);
5629 hci_cp.which = 0x01; /* Piconet clock */
5630 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5633 err = hci_req_run(&req, get_clock_info_complete);
5635 mgmt_pending_remove(cmd);
5638 hci_dev_unlock(hdev);
5642 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5644 struct hci_conn *conn;
5646 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5650 if (conn->dst_type != type)
5653 if (conn->state != BT_CONNECTED)
5659 /* This function requires the caller holds hdev->lock */
5660 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5661 u8 addr_type, u8 auto_connect)
5663 struct hci_dev *hdev = req->hdev;
5664 struct hci_conn_params *params;
5666 params = hci_conn_params_add(hdev, addr, addr_type);
5670 if (params->auto_connect == auto_connect)
5673 list_del_init(¶ms->action);
5675 switch (auto_connect) {
5676 case HCI_AUTO_CONN_DISABLED:
5677 case HCI_AUTO_CONN_LINK_LOSS:
5678 __hci_update_background_scan(req);
5680 case HCI_AUTO_CONN_REPORT:
5681 list_add(¶ms->action, &hdev->pend_le_reports);
5682 __hci_update_background_scan(req);
5684 case HCI_AUTO_CONN_DIRECT:
5685 case HCI_AUTO_CONN_ALWAYS:
5686 if (!is_connected(hdev, addr, addr_type)) {
5687 list_add(¶ms->action, &hdev->pend_le_conns);
5688 __hci_update_background_scan(req);
5693 params->auto_connect = auto_connect;
5695 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5701 static void device_added(struct sock *sk, struct hci_dev *hdev,
5702 bdaddr_t *bdaddr, u8 type, u8 action)
5704 struct mgmt_ev_device_added ev;
5706 bacpy(&ev.addr.bdaddr, bdaddr);
5707 ev.addr.type = type;
5710 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5713 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5715 struct mgmt_pending_cmd *cmd;
5717 BT_DBG("status 0x%02x", status);
5721 cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
5725 cmd->cmd_complete(cmd, mgmt_status(status));
5726 mgmt_pending_remove(cmd);
5729 hci_dev_unlock(hdev);
5732 static int add_device(struct sock *sk, struct hci_dev *hdev,
5733 void *data, u16 len)
5735 struct mgmt_cp_add_device *cp = data;
5736 struct mgmt_pending_cmd *cmd;
5737 struct hci_request req;
5738 u8 auto_conn, addr_type;
5741 BT_DBG("%s", hdev->name);
5743 if (!bdaddr_type_is_valid(cp->addr.type) ||
5744 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5745 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5746 MGMT_STATUS_INVALID_PARAMS,
5747 &cp->addr, sizeof(cp->addr));
5749 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5750 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5751 MGMT_STATUS_INVALID_PARAMS,
5752 &cp->addr, sizeof(cp->addr));
5754 hci_req_init(&req, hdev);
5758 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5764 cmd->cmd_complete = addr_cmd_complete;
5766 if (cp->addr.type == BDADDR_BREDR) {
5767 /* Only incoming connections action is supported for now */
5768 if (cp->action != 0x01) {
5769 err = cmd->cmd_complete(cmd,
5770 MGMT_STATUS_INVALID_PARAMS);
5771 mgmt_pending_remove(cmd);
5775 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5780 __hci_update_page_scan(&req);
5785 if (cp->addr.type == BDADDR_LE_PUBLIC)
5786 addr_type = ADDR_LE_DEV_PUBLIC;
5788 addr_type = ADDR_LE_DEV_RANDOM;
5790 if (cp->action == 0x02)
5791 auto_conn = HCI_AUTO_CONN_ALWAYS;
5792 else if (cp->action == 0x01)
5793 auto_conn = HCI_AUTO_CONN_DIRECT;
5795 auto_conn = HCI_AUTO_CONN_REPORT;
5797 /* If the connection parameters don't exist for this device,
5798 * they will be created and configured with defaults.
5800 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5802 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5803 mgmt_pending_remove(cmd);
5808 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5810 err = hci_req_run(&req, add_device_complete);
5812 /* ENODATA means no HCI commands were needed (e.g. if
5813 * the adapter is powered off).
5815 if (err == -ENODATA)
5816 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5817 mgmt_pending_remove(cmd);
5821 hci_dev_unlock(hdev);
5825 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5826 bdaddr_t *bdaddr, u8 type)
5828 struct mgmt_ev_device_removed ev;
5830 bacpy(&ev.addr.bdaddr, bdaddr);
5831 ev.addr.type = type;
5833 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5836 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5838 struct mgmt_pending_cmd *cmd;
5840 BT_DBG("status 0x%02x", status);
5844 cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5848 cmd->cmd_complete(cmd, mgmt_status(status));
5849 mgmt_pending_remove(cmd);
5852 hci_dev_unlock(hdev);
5855 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5856 void *data, u16 len)
5858 struct mgmt_cp_remove_device *cp = data;
5859 struct mgmt_pending_cmd *cmd;
5860 struct hci_request req;
5863 BT_DBG("%s", hdev->name);
5865 hci_req_init(&req, hdev);
5869 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5875 cmd->cmd_complete = addr_cmd_complete;
5877 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5878 struct hci_conn_params *params;
5881 if (!bdaddr_type_is_valid(cp->addr.type)) {
5882 err = cmd->cmd_complete(cmd,
5883 MGMT_STATUS_INVALID_PARAMS);
5884 mgmt_pending_remove(cmd);
5888 if (cp->addr.type == BDADDR_BREDR) {
5889 err = hci_bdaddr_list_del(&hdev->whitelist,
5893 err = cmd->cmd_complete(cmd,
5894 MGMT_STATUS_INVALID_PARAMS);
5895 mgmt_pending_remove(cmd);
5899 __hci_update_page_scan(&req);
5901 device_removed(sk, hdev, &cp->addr.bdaddr,
5906 if (cp->addr.type == BDADDR_LE_PUBLIC)
5907 addr_type = ADDR_LE_DEV_PUBLIC;
5909 addr_type = ADDR_LE_DEV_RANDOM;
5911 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5914 err = cmd->cmd_complete(cmd,
5915 MGMT_STATUS_INVALID_PARAMS);
5916 mgmt_pending_remove(cmd);
5920 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5921 err = cmd->cmd_complete(cmd,
5922 MGMT_STATUS_INVALID_PARAMS);
5923 mgmt_pending_remove(cmd);
5927 list_del(¶ms->action);
5928 list_del(¶ms->list);
5930 __hci_update_background_scan(&req);
5932 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5934 struct hci_conn_params *p, *tmp;
5935 struct bdaddr_list *b, *btmp;
5937 if (cp->addr.type) {
5938 err = cmd->cmd_complete(cmd,
5939 MGMT_STATUS_INVALID_PARAMS);
5940 mgmt_pending_remove(cmd);
5944 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5945 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5950 __hci_update_page_scan(&req);
5952 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5953 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5955 device_removed(sk, hdev, &p->addr, p->addr_type);
5956 list_del(&p->action);
5961 BT_DBG("All LE connection parameters were removed");
5963 __hci_update_background_scan(&req);
5967 err = hci_req_run(&req, remove_device_complete);
5969 /* ENODATA means no HCI commands were needed (e.g. if
5970 * the adapter is powered off).
5972 if (err == -ENODATA)
5973 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5974 mgmt_pending_remove(cmd);
5978 hci_dev_unlock(hdev);
5982 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5985 struct mgmt_cp_load_conn_param *cp = data;
5986 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5987 sizeof(struct mgmt_conn_param));
5988 u16 param_count, expected_len;
5991 if (!lmp_le_capable(hdev))
5992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5993 MGMT_STATUS_NOT_SUPPORTED);
5995 param_count = __le16_to_cpu(cp->param_count);
5996 if (param_count > max_param_count) {
5997 BT_ERR("load_conn_param: too big param_count value %u",
5999 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6000 MGMT_STATUS_INVALID_PARAMS);
6003 expected_len = sizeof(*cp) + param_count *
6004 sizeof(struct mgmt_conn_param);
6005 if (expected_len != len) {
6006 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6008 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6009 MGMT_STATUS_INVALID_PARAMS);
6012 BT_DBG("%s param_count %u", hdev->name, param_count);
6016 hci_conn_params_clear_disabled(hdev);
6018 for (i = 0; i < param_count; i++) {
6019 struct mgmt_conn_param *param = &cp->params[i];
6020 struct hci_conn_params *hci_param;
6021 u16 min, max, latency, timeout;
6024 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
6027 if (param->addr.type == BDADDR_LE_PUBLIC) {
6028 addr_type = ADDR_LE_DEV_PUBLIC;
6029 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6030 addr_type = ADDR_LE_DEV_RANDOM;
6032 BT_ERR("Ignoring invalid connection parameters");
6036 min = le16_to_cpu(param->min_interval);
6037 max = le16_to_cpu(param->max_interval);
6038 latency = le16_to_cpu(param->latency);
6039 timeout = le16_to_cpu(param->timeout);
6041 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6042 min, max, latency, timeout);
6044 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6045 BT_ERR("Ignoring invalid connection parameters");
6049 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6052 BT_ERR("Failed to add connection parameters");
6056 hci_param->conn_min_interval = min;
6057 hci_param->conn_max_interval = max;
6058 hci_param->conn_latency = latency;
6059 hci_param->supervision_timeout = timeout;
6062 hci_dev_unlock(hdev);
6064 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6068 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6069 void *data, u16 len)
6071 struct mgmt_cp_set_external_config *cp = data;
6075 BT_DBG("%s", hdev->name);
6077 if (hdev_is_powered(hdev))
6078 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6079 MGMT_STATUS_REJECTED);
6081 if (cp->config != 0x00 && cp->config != 0x01)
6082 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6083 MGMT_STATUS_INVALID_PARAMS);
6085 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6086 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6087 MGMT_STATUS_NOT_SUPPORTED);
6092 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
6095 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
6098 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6105 err = new_options(hdev, sk);
6107 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
6108 mgmt_index_removed(hdev);
6110 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6111 set_bit(HCI_CONFIG, &hdev->dev_flags);
6112 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
6114 queue_work(hdev->req_workqueue, &hdev->power_on);
6116 set_bit(HCI_RAW, &hdev->flags);
6117 mgmt_index_added(hdev);
6122 hci_dev_unlock(hdev);
6126 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6127 void *data, u16 len)
6129 struct mgmt_cp_set_public_address *cp = data;
6133 BT_DBG("%s", hdev->name);
6135 if (hdev_is_powered(hdev))
6136 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6137 MGMT_STATUS_REJECTED);
6139 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6140 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6141 MGMT_STATUS_INVALID_PARAMS);
6143 if (!hdev->set_bdaddr)
6144 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6145 MGMT_STATUS_NOT_SUPPORTED);
6149 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6150 bacpy(&hdev->public_addr, &cp->bdaddr);
6152 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6159 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6160 err = new_options(hdev, sk);
6162 if (is_configured(hdev)) {
6163 mgmt_index_removed(hdev);
6165 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
6167 set_bit(HCI_CONFIG, &hdev->dev_flags);
6168 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
6170 queue_work(hdev->req_workqueue, &hdev->power_on);
6174 hci_dev_unlock(hdev);
6178 static const struct hci_mgmt_handler mgmt_handlers[] = {
6179 { NULL }, /* 0x0000 (no command) */
6180 { read_version, MGMT_READ_VERSION_SIZE,
6182 { read_commands, MGMT_READ_COMMANDS_SIZE,
6184 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6186 { read_controller_info, MGMT_READ_INFO_SIZE, 0 },
6187 { set_powered, MGMT_SETTING_SIZE, 0 },
6188 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE, 0 },
6189 { set_connectable, MGMT_SETTING_SIZE, 0 },
6190 { set_fast_connectable, MGMT_SETTING_SIZE, 0 },
6191 { set_bondable, MGMT_SETTING_SIZE, 0 },
6192 { set_link_security, MGMT_SETTING_SIZE, 0 },
6193 { set_ssp, MGMT_SETTING_SIZE, 0 },
6194 { set_hs, MGMT_SETTING_SIZE, 0 },
6195 { set_le, MGMT_SETTING_SIZE, 0 },
6196 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE, 0 },
6197 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE, 0 },
6198 { add_uuid, MGMT_ADD_UUID_SIZE, 0 },
6199 { remove_uuid, MGMT_REMOVE_UUID_SIZE, 0 },
6200 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6202 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6204 { disconnect, MGMT_DISCONNECT_SIZE, 0 },
6205 { get_connections, MGMT_GET_CONNECTIONS_SIZE, 0 },
6206 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE, 0 },
6207 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE, 0 },
6208 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE, 0 },
6209 { pair_device, MGMT_PAIR_DEVICE_SIZE, 0 },
6210 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE, 0 },
6211 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE, 0 },
6212 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE, 0 },
6213 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE, 0 },
6214 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE, 0 },
6215 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE, 0 },
6216 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6217 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6219 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE, 0 },
6220 { start_discovery, MGMT_START_DISCOVERY_SIZE, 0 },
6221 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE, 0 },
6222 { confirm_name, MGMT_CONFIRM_NAME_SIZE, 0 },
6223 { block_device, MGMT_BLOCK_DEVICE_SIZE, 0 },
6224 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE, 0 },
6225 { set_device_id, MGMT_SET_DEVICE_ID_SIZE, 0 },
6226 { set_advertising, MGMT_SETTING_SIZE, 0 },
6227 { set_bredr, MGMT_SETTING_SIZE, 0 },
6228 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE, 0 },
6229 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE, 0 },
6230 { set_secure_conn, MGMT_SETTING_SIZE, 0 },
6231 { set_debug_keys, MGMT_SETTING_SIZE, 0 },
6232 { set_privacy, MGMT_SET_PRIVACY_SIZE, 0 },
6233 { load_irks, MGMT_LOAD_IRKS_SIZE,
6235 { get_conn_info, MGMT_GET_CONN_INFO_SIZE, 0 },
6236 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE, 0 },
6237 { add_device, MGMT_ADD_DEVICE_SIZE, 0 },
6238 { remove_device, MGMT_REMOVE_DEVICE_SIZE, 0 },
6239 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6241 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6243 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6244 HCI_MGMT_UNCONFIGURED },
6245 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6246 HCI_MGMT_UNCONFIGURED },
6247 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6248 HCI_MGMT_UNCONFIGURED },
6249 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6253 int mgmt_control(struct hci_mgmt_chan *chan, struct sock *sk,
6254 struct msghdr *msg, size_t msglen)
6258 struct mgmt_hdr *hdr;
6259 u16 opcode, index, len;
6260 struct hci_dev *hdev = NULL;
6261 const struct hci_mgmt_handler *handler;
6262 bool var_len, no_hdev;
6265 BT_DBG("got %zu bytes", msglen);
6267 if (msglen < sizeof(*hdr))
6270 buf = kmalloc(msglen, GFP_KERNEL);
6274 if (memcpy_from_msg(buf, msg, msglen)) {
6280 opcode = __le16_to_cpu(hdr->opcode);
6281 index = __le16_to_cpu(hdr->index);
6282 len = __le16_to_cpu(hdr->len);
6284 if (len != msglen - sizeof(*hdr)) {
6289 if (opcode >= chan->handler_count ||
6290 chan->handlers[opcode].func == NULL) {
6291 BT_DBG("Unknown op %u", opcode);
6292 err = mgmt_cmd_status(sk, index, opcode,
6293 MGMT_STATUS_UNKNOWN_COMMAND);
6297 handler = &chan->handlers[opcode];
6299 if (index != MGMT_INDEX_NONE) {
6300 hdev = hci_dev_get(index);
6302 err = mgmt_cmd_status(sk, index, opcode,
6303 MGMT_STATUS_INVALID_INDEX);
6307 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
6308 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
6309 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
6310 err = mgmt_cmd_status(sk, index, opcode,
6311 MGMT_STATUS_INVALID_INDEX);
6315 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
6316 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
6317 err = mgmt_cmd_status(sk, index, opcode,
6318 MGMT_STATUS_INVALID_INDEX);
6323 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
6324 if (no_hdev != !hdev) {
6325 err = mgmt_cmd_status(sk, index, opcode,
6326 MGMT_STATUS_INVALID_INDEX);
6330 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
6331 if ((var_len && len < handler->data_len) ||
6332 (!var_len && len != handler->data_len)) {
6333 err = mgmt_cmd_status(sk, index, opcode,
6334 MGMT_STATUS_INVALID_PARAMS);
6339 mgmt_init_hdev(sk, hdev);
6341 cp = buf + sizeof(*hdr);
6343 err = handler->func(sk, hdev, cp, len);
6357 void mgmt_index_added(struct hci_dev *hdev)
6359 if (hdev->dev_type != HCI_BREDR)
6362 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6365 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6366 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6368 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6371 void mgmt_index_removed(struct hci_dev *hdev)
6373 u8 status = MGMT_STATUS_INVALID_INDEX;
6375 if (hdev->dev_type != HCI_BREDR)
6378 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6381 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6383 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6384 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6386 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6389 /* This function requires the caller holds hdev->lock */
6390 static void restart_le_actions(struct hci_request *req)
6392 struct hci_dev *hdev = req->hdev;
6393 struct hci_conn_params *p;
6395 list_for_each_entry(p, &hdev->le_conn_params, list) {
6396 /* Needed for AUTO_OFF case where might not "really"
6397 * have been powered off.
6399 list_del_init(&p->action);
6401 switch (p->auto_connect) {
6402 case HCI_AUTO_CONN_DIRECT:
6403 case HCI_AUTO_CONN_ALWAYS:
6404 list_add(&p->action, &hdev->pend_le_conns);
6406 case HCI_AUTO_CONN_REPORT:
6407 list_add(&p->action, &hdev->pend_le_reports);
6414 __hci_update_background_scan(req);
6417 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6419 struct cmd_lookup match = { NULL, hdev };
6421 BT_DBG("status 0x%02x", status);
6424 /* Register the available SMP channels (BR/EDR and LE) only
6425 * when successfully powering on the controller. This late
6426 * registration is required so that LE SMP can clearly
6427 * decide if the public address or static address is used.
6434 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6436 new_settings(hdev, match.sk);
6438 hci_dev_unlock(hdev);
6444 static int powered_update_hci(struct hci_dev *hdev)
6446 struct hci_request req;
6449 hci_req_init(&req, hdev);
6451 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6452 !lmp_host_ssp_capable(hdev)) {
6455 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
6457 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6460 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
6461 sizeof(support), &support);
6465 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6466 lmp_bredr_capable(hdev)) {
6467 struct hci_cp_write_le_host_supported cp;
6472 /* Check first if we already have the right
6473 * host state (host features set)
6475 if (cp.le != lmp_host_le_capable(hdev) ||
6476 cp.simul != lmp_host_le_br_capable(hdev))
6477 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6481 if (lmp_le_capable(hdev)) {
6482 /* Make sure the controller has a good default for
6483 * advertising data. This also applies to the case
6484 * where BR/EDR was toggled during the AUTO_OFF phase.
6486 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6487 update_adv_data(&req);
6488 update_scan_rsp_data(&req);
6491 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6492 enable_advertising(&req);
6494 restart_le_actions(&req);
6497 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6498 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6499 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6500 sizeof(link_sec), &link_sec);
6502 if (lmp_bredr_capable(hdev)) {
6503 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
6504 write_fast_connectable(&req, true);
6506 write_fast_connectable(&req, false);
6507 __hci_update_page_scan(&req);
6513 return hci_req_run(&req, powered_complete);
6516 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6518 struct cmd_lookup match = { NULL, hdev };
6519 u8 status, zero_cod[] = { 0, 0, 0 };
6522 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6526 if (powered_update_hci(hdev) == 0)
6529 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6534 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6536 /* If the power off is because of hdev unregistration let
6537 * use the appropriate INVALID_INDEX status. Otherwise use
6538 * NOT_POWERED. We cover both scenarios here since later in
6539 * mgmt_index_removed() any hci_conn callbacks will have already
6540 * been triggered, potentially causing misleading DISCONNECTED
6543 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
6544 status = MGMT_STATUS_INVALID_INDEX;
6546 status = MGMT_STATUS_NOT_POWERED;
6548 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6550 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6551 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6552 zero_cod, sizeof(zero_cod), NULL);
6555 err = new_settings(hdev, match.sk);
6563 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6565 struct mgmt_pending_cmd *cmd;
6568 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6572 if (err == -ERFKILL)
6573 status = MGMT_STATUS_RFKILLED;
6575 status = MGMT_STATUS_FAILED;
6577 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6579 mgmt_pending_remove(cmd);
6582 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6584 struct hci_request req;
6588 /* When discoverable timeout triggers, then just make sure
6589 * the limited discoverable flag is cleared. Even in the case
6590 * of a timeout triggered from general discoverable, it is
6591 * safe to unconditionally clear the flag.
6593 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6594 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6596 hci_req_init(&req, hdev);
6597 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6598 u8 scan = SCAN_PAGE;
6599 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6600 sizeof(scan), &scan);
6603 update_adv_data(&req);
6604 hci_req_run(&req, NULL);
6606 hdev->discov_timeout = 0;
6608 new_settings(hdev, NULL);
6610 hci_dev_unlock(hdev);
6613 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6616 struct mgmt_ev_new_link_key ev;
6618 memset(&ev, 0, sizeof(ev));
6620 ev.store_hint = persistent;
6621 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6622 ev.key.addr.type = BDADDR_BREDR;
6623 ev.key.type = key->type;
6624 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6625 ev.key.pin_len = key->pin_len;
6627 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6630 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6632 switch (ltk->type) {
6635 if (ltk->authenticated)
6636 return MGMT_LTK_AUTHENTICATED;
6637 return MGMT_LTK_UNAUTHENTICATED;
6639 if (ltk->authenticated)
6640 return MGMT_LTK_P256_AUTH;
6641 return MGMT_LTK_P256_UNAUTH;
6642 case SMP_LTK_P256_DEBUG:
6643 return MGMT_LTK_P256_DEBUG;
6646 return MGMT_LTK_UNAUTHENTICATED;
6649 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6651 struct mgmt_ev_new_long_term_key ev;
6653 memset(&ev, 0, sizeof(ev));
6655 /* Devices using resolvable or non-resolvable random addresses
6656 * without providing an indentity resolving key don't require
6657 * to store long term keys. Their addresses will change the
6660 * Only when a remote device provides an identity address
6661 * make sure the long term key is stored. If the remote
6662 * identity is known, the long term keys are internally
6663 * mapped to the identity address. So allow static random
6664 * and public addresses here.
6666 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6667 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6668 ev.store_hint = 0x00;
6670 ev.store_hint = persistent;
6672 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6673 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6674 ev.key.type = mgmt_ltk_type(key);
6675 ev.key.enc_size = key->enc_size;
6676 ev.key.ediv = key->ediv;
6677 ev.key.rand = key->rand;
6679 if (key->type == SMP_LTK)
6682 memcpy(ev.key.val, key->val, sizeof(key->val));
6684 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6687 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6689 struct mgmt_ev_new_irk ev;
6691 memset(&ev, 0, sizeof(ev));
6693 /* For identity resolving keys from devices that are already
6694 * using a public address or static random address, do not
6695 * ask for storing this key. The identity resolving key really
6696 * is only mandatory for devices using resovlable random
6699 * Storing all identity resolving keys has the downside that
6700 * they will be also loaded on next boot of they system. More
6701 * identity resolving keys, means more time during scanning is
6702 * needed to actually resolve these addresses.
6704 if (bacmp(&irk->rpa, BDADDR_ANY))
6705 ev.store_hint = 0x01;
6707 ev.store_hint = 0x00;
6709 bacpy(&ev.rpa, &irk->rpa);
6710 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6711 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6712 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6714 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6717 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6720 struct mgmt_ev_new_csrk ev;
6722 memset(&ev, 0, sizeof(ev));
6724 /* Devices using resolvable or non-resolvable random addresses
6725 * without providing an indentity resolving key don't require
6726 * to store signature resolving keys. Their addresses will change
6727 * the next time around.
6729 * Only when a remote device provides an identity address
6730 * make sure the signature resolving key is stored. So allow
6731 * static random and public addresses here.
6733 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6734 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6735 ev.store_hint = 0x00;
6737 ev.store_hint = persistent;
6739 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6740 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6741 ev.key.type = csrk->type;
6742 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6744 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6747 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6748 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6749 u16 max_interval, u16 latency, u16 timeout)
6751 struct mgmt_ev_new_conn_param ev;
6753 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6756 memset(&ev, 0, sizeof(ev));
6757 bacpy(&ev.addr.bdaddr, bdaddr);
6758 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6759 ev.store_hint = store_hint;
6760 ev.min_interval = cpu_to_le16(min_interval);
6761 ev.max_interval = cpu_to_le16(max_interval);
6762 ev.latency = cpu_to_le16(latency);
6763 ev.timeout = cpu_to_le16(timeout);
6765 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6768 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6771 eir[eir_len++] = sizeof(type) + data_len;
6772 eir[eir_len++] = type;
6773 memcpy(&eir[eir_len], data, data_len);
6774 eir_len += data_len;
6779 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6780 u32 flags, u8 *name, u8 name_len)
6783 struct mgmt_ev_device_connected *ev = (void *) buf;
6786 bacpy(&ev->addr.bdaddr, &conn->dst);
6787 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6789 ev->flags = __cpu_to_le32(flags);
6791 /* We must ensure that the EIR Data fields are ordered and
6792 * unique. Keep it simple for now and avoid the problem by not
6793 * adding any BR/EDR data to the LE adv.
6795 if (conn->le_adv_data_len > 0) {
6796 memcpy(&ev->eir[eir_len],
6797 conn->le_adv_data, conn->le_adv_data_len);
6798 eir_len = conn->le_adv_data_len;
6801 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6804 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6805 eir_len = eir_append_data(ev->eir, eir_len,
6807 conn->dev_class, 3);
6810 ev->eir_len = cpu_to_le16(eir_len);
6812 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6813 sizeof(*ev) + eir_len, NULL);
6816 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
6818 struct sock **sk = data;
6820 cmd->cmd_complete(cmd, 0);
6825 mgmt_pending_remove(cmd);
6828 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
6830 struct hci_dev *hdev = data;
6831 struct mgmt_cp_unpair_device *cp = cmd->param;
6833 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6835 cmd->cmd_complete(cmd, 0);
6836 mgmt_pending_remove(cmd);
6839 bool mgmt_powering_down(struct hci_dev *hdev)
6841 struct mgmt_pending_cmd *cmd;
6842 struct mgmt_mode *cp;
6844 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6855 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6856 u8 link_type, u8 addr_type, u8 reason,
6857 bool mgmt_connected)
6859 struct mgmt_ev_device_disconnected ev;
6860 struct sock *sk = NULL;
6862 /* The connection is still in hci_conn_hash so test for 1
6863 * instead of 0 to know if this is the last one.
6865 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6866 cancel_delayed_work(&hdev->power_off);
6867 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6870 if (!mgmt_connected)
6873 if (link_type != ACL_LINK && link_type != LE_LINK)
6876 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6878 bacpy(&ev.addr.bdaddr, bdaddr);
6879 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6882 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6887 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6891 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6892 u8 link_type, u8 addr_type, u8 status)
6894 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6895 struct mgmt_cp_disconnect *cp;
6896 struct mgmt_pending_cmd *cmd;
6898 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6901 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6907 if (bacmp(bdaddr, &cp->addr.bdaddr))
6910 if (cp->addr.type != bdaddr_type)
6913 cmd->cmd_complete(cmd, mgmt_status(status));
6914 mgmt_pending_remove(cmd);
6917 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6918 u8 addr_type, u8 status)
6920 struct mgmt_ev_connect_failed ev;
6922 /* The connection is still in hci_conn_hash so test for 1
6923 * instead of 0 to know if this is the last one.
6925 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6926 cancel_delayed_work(&hdev->power_off);
6927 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6930 bacpy(&ev.addr.bdaddr, bdaddr);
6931 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6932 ev.status = mgmt_status(status);
6934 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6937 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6939 struct mgmt_ev_pin_code_request ev;
6941 bacpy(&ev.addr.bdaddr, bdaddr);
6942 ev.addr.type = BDADDR_BREDR;
6945 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6948 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6951 struct mgmt_pending_cmd *cmd;
6953 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6957 cmd->cmd_complete(cmd, mgmt_status(status));
6958 mgmt_pending_remove(cmd);
6961 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6964 struct mgmt_pending_cmd *cmd;
6966 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6970 cmd->cmd_complete(cmd, mgmt_status(status));
6971 mgmt_pending_remove(cmd);
6974 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6975 u8 link_type, u8 addr_type, u32 value,
6978 struct mgmt_ev_user_confirm_request ev;
6980 BT_DBG("%s", hdev->name);
6982 bacpy(&ev.addr.bdaddr, bdaddr);
6983 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6984 ev.confirm_hint = confirm_hint;
6985 ev.value = cpu_to_le32(value);
6987 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6991 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6992 u8 link_type, u8 addr_type)
6994 struct mgmt_ev_user_passkey_request ev;
6996 BT_DBG("%s", hdev->name);
6998 bacpy(&ev.addr.bdaddr, bdaddr);
6999 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7001 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7005 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7006 u8 link_type, u8 addr_type, u8 status,
7009 struct mgmt_pending_cmd *cmd;
7011 cmd = mgmt_pending_find(opcode, hdev);
7015 cmd->cmd_complete(cmd, mgmt_status(status));
7016 mgmt_pending_remove(cmd);
7021 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7022 u8 link_type, u8 addr_type, u8 status)
7024 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7025 status, MGMT_OP_USER_CONFIRM_REPLY);
7028 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7029 u8 link_type, u8 addr_type, u8 status)
7031 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7033 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7036 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7037 u8 link_type, u8 addr_type, u8 status)
7039 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7040 status, MGMT_OP_USER_PASSKEY_REPLY);
7043 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7044 u8 link_type, u8 addr_type, u8 status)
7046 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7048 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7051 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7052 u8 link_type, u8 addr_type, u32 passkey,
7055 struct mgmt_ev_passkey_notify ev;
7057 BT_DBG("%s", hdev->name);
7059 bacpy(&ev.addr.bdaddr, bdaddr);
7060 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7061 ev.passkey = __cpu_to_le32(passkey);
7062 ev.entered = entered;
7064 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7067 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7069 struct mgmt_ev_auth_failed ev;
7070 struct mgmt_pending_cmd *cmd;
7071 u8 status = mgmt_status(hci_status);
7073 bacpy(&ev.addr.bdaddr, &conn->dst);
7074 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7077 cmd = find_pairing(conn);
7079 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7080 cmd ? cmd->sk : NULL);
7083 cmd->cmd_complete(cmd, status);
7084 mgmt_pending_remove(cmd);
7088 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7090 struct cmd_lookup match = { NULL, hdev };
7094 u8 mgmt_err = mgmt_status(status);
7095 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7096 cmd_status_rsp, &mgmt_err);
7100 if (test_bit(HCI_AUTH, &hdev->flags))
7101 changed = !test_and_set_bit(HCI_LINK_SECURITY,
7104 changed = test_and_clear_bit(HCI_LINK_SECURITY,
7107 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7111 new_settings(hdev, match.sk);
7117 static void clear_eir(struct hci_request *req)
7119 struct hci_dev *hdev = req->hdev;
7120 struct hci_cp_write_eir cp;
7122 if (!lmp_ext_inq_capable(hdev))
7125 memset(hdev->eir, 0, sizeof(hdev->eir));
7127 memset(&cp, 0, sizeof(cp));
7129 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7132 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7134 struct cmd_lookup match = { NULL, hdev };
7135 struct hci_request req;
7136 bool changed = false;
7139 u8 mgmt_err = mgmt_status(status);
7141 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
7142 &hdev->dev_flags)) {
7143 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
7144 new_settings(hdev, NULL);
7147 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7153 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
7155 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
7157 changed = test_and_clear_bit(HCI_HS_ENABLED,
7160 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
7163 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7166 new_settings(hdev, match.sk);
7171 hci_req_init(&req, hdev);
7173 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
7174 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
7175 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7176 sizeof(enable), &enable);
7182 hci_req_run(&req, NULL);
7185 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7187 struct cmd_lookup *match = data;
7189 if (match->sk == NULL) {
7190 match->sk = cmd->sk;
7191 sock_hold(match->sk);
7195 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7198 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7200 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7201 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7202 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7205 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
7212 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7214 struct mgmt_cp_set_local_name ev;
7215 struct mgmt_pending_cmd *cmd;
7220 memset(&ev, 0, sizeof(ev));
7221 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7222 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7224 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7226 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7228 /* If this is a HCI command related to powering on the
7229 * HCI dev don't send any mgmt signals.
7231 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
7235 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7236 cmd ? cmd->sk : NULL);
7239 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7240 u8 *rand192, u8 *hash256, u8 *rand256,
7243 struct mgmt_pending_cmd *cmd;
7245 BT_DBG("%s status %u", hdev->name, status);
7247 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7252 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7253 mgmt_status(status));
7255 struct mgmt_rp_read_local_oob_data rp;
7256 size_t rp_size = sizeof(rp);
7258 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7259 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7261 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7262 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7263 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7265 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7268 mgmt_cmd_complete(cmd->sk, hdev->id,
7269 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7273 mgmt_pending_remove(cmd);
7276 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7280 for (i = 0; i < uuid_count; i++) {
7281 if (!memcmp(uuid, uuids[i], 16))
7288 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7292 while (parsed < eir_len) {
7293 u8 field_len = eir[0];
7300 if (eir_len - parsed < field_len + 1)
7304 case EIR_UUID16_ALL:
7305 case EIR_UUID16_SOME:
7306 for (i = 0; i + 3 <= field_len; i += 2) {
7307 memcpy(uuid, bluetooth_base_uuid, 16);
7308 uuid[13] = eir[i + 3];
7309 uuid[12] = eir[i + 2];
7310 if (has_uuid(uuid, uuid_count, uuids))
7314 case EIR_UUID32_ALL:
7315 case EIR_UUID32_SOME:
7316 for (i = 0; i + 5 <= field_len; i += 4) {
7317 memcpy(uuid, bluetooth_base_uuid, 16);
7318 uuid[15] = eir[i + 5];
7319 uuid[14] = eir[i + 4];
7320 uuid[13] = eir[i + 3];
7321 uuid[12] = eir[i + 2];
7322 if (has_uuid(uuid, uuid_count, uuids))
7326 case EIR_UUID128_ALL:
7327 case EIR_UUID128_SOME:
7328 for (i = 0; i + 17 <= field_len; i += 16) {
7329 memcpy(uuid, eir + i + 2, 16);
7330 if (has_uuid(uuid, uuid_count, uuids))
7336 parsed += field_len + 1;
7337 eir += field_len + 1;
7343 static void restart_le_scan(struct hci_dev *hdev)
7345 /* If controller is not scanning we are done. */
7346 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
7349 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7350 hdev->discovery.scan_start +
7351 hdev->discovery.scan_duration))
7354 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
7355 DISCOV_LE_RESTART_DELAY);
7358 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7359 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7361 /* If a RSSI threshold has been specified, and
7362 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7363 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7364 * is set, let it through for further processing, as we might need to
7367 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7368 * the results are also dropped.
7370 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7371 (rssi == HCI_RSSI_INVALID ||
7372 (rssi < hdev->discovery.rssi &&
7373 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7376 if (hdev->discovery.uuid_count != 0) {
7377 /* If a list of UUIDs is provided in filter, results with no
7378 * matching UUID should be dropped.
7380 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7381 hdev->discovery.uuids) &&
7382 !eir_has_uuids(scan_rsp, scan_rsp_len,
7383 hdev->discovery.uuid_count,
7384 hdev->discovery.uuids))
7388 /* If duplicate filtering does not report RSSI changes, then restart
7389 * scanning to ensure updated result with updated RSSI values.
7391 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7392 restart_le_scan(hdev);
7394 /* Validate RSSI value against the RSSI threshold once more. */
7395 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7396 rssi < hdev->discovery.rssi)
7403 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7404 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7405 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7408 struct mgmt_ev_device_found *ev = (void *)buf;
7411 /* Don't send events for a non-kernel initiated discovery. With
7412 * LE one exception is if we have pend_le_reports > 0 in which
7413 * case we're doing passive scanning and want these events.
7415 if (!hci_discovery_active(hdev)) {
7416 if (link_type == ACL_LINK)
7418 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7422 if (hdev->discovery.result_filtering) {
7423 /* We are using service discovery */
7424 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7429 /* Make sure that the buffer is big enough. The 5 extra bytes
7430 * are for the potential CoD field.
7432 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7435 memset(buf, 0, sizeof(buf));
7437 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7438 * RSSI value was reported as 0 when not available. This behavior
7439 * is kept when using device discovery. This is required for full
7440 * backwards compatibility with the API.
7442 * However when using service discovery, the value 127 will be
7443 * returned when the RSSI is not available.
7445 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7446 link_type == ACL_LINK)
7449 bacpy(&ev->addr.bdaddr, bdaddr);
7450 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7452 ev->flags = cpu_to_le32(flags);
7455 /* Copy EIR or advertising data into event */
7456 memcpy(ev->eir, eir, eir_len);
7458 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7459 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7462 if (scan_rsp_len > 0)
7463 /* Append scan response data to event */
7464 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7466 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7467 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7469 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7472 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7473 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7475 struct mgmt_ev_device_found *ev;
7476 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7479 ev = (struct mgmt_ev_device_found *) buf;
7481 memset(buf, 0, sizeof(buf));
7483 bacpy(&ev->addr.bdaddr, bdaddr);
7484 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7487 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7490 ev->eir_len = cpu_to_le16(eir_len);
7492 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7495 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7497 struct mgmt_ev_discovering ev;
7499 BT_DBG("%s discovering %u", hdev->name, discovering);
7501 memset(&ev, 0, sizeof(ev));
7502 ev.type = hdev->discovery.type;
7503 ev.discovering = discovering;
7505 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7508 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7510 BT_DBG("%s status %u", hdev->name, status);
7513 void mgmt_reenable_advertising(struct hci_dev *hdev)
7515 struct hci_request req;
7517 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7520 hci_req_init(&req, hdev);
7521 enable_advertising(&req);
7522 hci_req_run(&req, adv_enable_complete);
7525 static struct hci_mgmt_chan chan = {
7526 .channel = HCI_CHANNEL_CONTROL,
7527 .handler_count = ARRAY_SIZE(mgmt_handlers),
7528 .handlers = mgmt_handlers,
7533 return hci_mgmt_chan_register(&chan);
7536 void mgmt_exit(void)
7538 hci_mgmt_chan_unregister(&chan);