2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
39 #define MGMT_VERSION 1
40 #define MGMT_REVISION 8
42 static const u16 mgmt_commands[] = {
43 MGMT_OP_READ_INDEX_LIST,
46 MGMT_OP_SET_DISCOVERABLE,
47 MGMT_OP_SET_CONNECTABLE,
48 MGMT_OP_SET_FAST_CONNECTABLE,
50 MGMT_OP_SET_LINK_SECURITY,
54 MGMT_OP_SET_DEV_CLASS,
55 MGMT_OP_SET_LOCAL_NAME,
58 MGMT_OP_LOAD_LINK_KEYS,
59 MGMT_OP_LOAD_LONG_TERM_KEYS,
61 MGMT_OP_GET_CONNECTIONS,
62 MGMT_OP_PIN_CODE_REPLY,
63 MGMT_OP_PIN_CODE_NEG_REPLY,
64 MGMT_OP_SET_IO_CAPABILITY,
66 MGMT_OP_CANCEL_PAIR_DEVICE,
67 MGMT_OP_UNPAIR_DEVICE,
68 MGMT_OP_USER_CONFIRM_REPLY,
69 MGMT_OP_USER_CONFIRM_NEG_REPLY,
70 MGMT_OP_USER_PASSKEY_REPLY,
71 MGMT_OP_USER_PASSKEY_NEG_REPLY,
72 MGMT_OP_READ_LOCAL_OOB_DATA,
73 MGMT_OP_ADD_REMOTE_OOB_DATA,
74 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
75 MGMT_OP_START_DISCOVERY,
76 MGMT_OP_STOP_DISCOVERY,
79 MGMT_OP_UNBLOCK_DEVICE,
80 MGMT_OP_SET_DEVICE_ID,
81 MGMT_OP_SET_ADVERTISING,
83 MGMT_OP_SET_STATIC_ADDRESS,
84 MGMT_OP_SET_SCAN_PARAMS,
85 MGMT_OP_SET_SECURE_CONN,
86 MGMT_OP_SET_DEBUG_KEYS,
89 MGMT_OP_GET_CONN_INFO,
90 MGMT_OP_GET_CLOCK_INFO,
92 MGMT_OP_REMOVE_DEVICE,
93 MGMT_OP_LOAD_CONN_PARAM,
94 MGMT_OP_READ_UNCONF_INDEX_LIST,
95 MGMT_OP_READ_CONFIG_INFO,
96 MGMT_OP_SET_EXTERNAL_CONFIG,
97 MGMT_OP_SET_PUBLIC_ADDRESS,
98 MGMT_OP_START_SERVICE_DISCOVERY,
101 static const u16 mgmt_events[] = {
102 MGMT_EV_CONTROLLER_ERROR,
104 MGMT_EV_INDEX_REMOVED,
105 MGMT_EV_NEW_SETTINGS,
106 MGMT_EV_CLASS_OF_DEV_CHANGED,
107 MGMT_EV_LOCAL_NAME_CHANGED,
108 MGMT_EV_NEW_LINK_KEY,
109 MGMT_EV_NEW_LONG_TERM_KEY,
110 MGMT_EV_DEVICE_CONNECTED,
111 MGMT_EV_DEVICE_DISCONNECTED,
112 MGMT_EV_CONNECT_FAILED,
113 MGMT_EV_PIN_CODE_REQUEST,
114 MGMT_EV_USER_CONFIRM_REQUEST,
115 MGMT_EV_USER_PASSKEY_REQUEST,
117 MGMT_EV_DEVICE_FOUND,
119 MGMT_EV_DEVICE_BLOCKED,
120 MGMT_EV_DEVICE_UNBLOCKED,
121 MGMT_EV_DEVICE_UNPAIRED,
122 MGMT_EV_PASSKEY_NOTIFY,
125 MGMT_EV_DEVICE_ADDED,
126 MGMT_EV_DEVICE_REMOVED,
127 MGMT_EV_NEW_CONN_PARAM,
128 MGMT_EV_UNCONF_INDEX_ADDED,
129 MGMT_EV_UNCONF_INDEX_REMOVED,
130 MGMT_EV_NEW_CONFIG_OPTIONS,
133 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
135 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
136 "\x00\x00\x00\x00\x00\x00\x00\x00"
139 struct list_head list;
146 int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
149 /* HCI to MGMT error code conversion table */
150 static u8 mgmt_status_table[] = {
152 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
153 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
154 MGMT_STATUS_FAILED, /* Hardware Failure */
155 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
156 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
157 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
158 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
159 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
160 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
161 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
162 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
163 MGMT_STATUS_BUSY, /* Command Disallowed */
164 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
165 MGMT_STATUS_REJECTED, /* Rejected Security */
166 MGMT_STATUS_REJECTED, /* Rejected Personal */
167 MGMT_STATUS_TIMEOUT, /* Host Timeout */
168 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
169 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
170 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
171 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
172 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
173 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
174 MGMT_STATUS_BUSY, /* Repeated Attempts */
175 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
176 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
177 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
178 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
179 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
180 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
181 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
182 MGMT_STATUS_FAILED, /* Unspecified Error */
183 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
184 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
185 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
186 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
187 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
188 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
189 MGMT_STATUS_FAILED, /* Unit Link Key Used */
190 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
191 MGMT_STATUS_TIMEOUT, /* Instant Passed */
192 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
193 MGMT_STATUS_FAILED, /* Transaction Collision */
194 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
195 MGMT_STATUS_REJECTED, /* QoS Rejected */
196 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
197 MGMT_STATUS_REJECTED, /* Insufficient Security */
198 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
199 MGMT_STATUS_BUSY, /* Role Switch Pending */
200 MGMT_STATUS_FAILED, /* Slot Violation */
201 MGMT_STATUS_FAILED, /* Role Switch Failed */
202 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
203 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
204 MGMT_STATUS_BUSY, /* Host Busy Pairing */
205 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
206 MGMT_STATUS_BUSY, /* Controller Busy */
207 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
208 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
209 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
210 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
211 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
214 static u8 mgmt_status(u8 hci_status)
216 if (hci_status < ARRAY_SIZE(mgmt_status_table))
217 return mgmt_status_table[hci_status];
219 return MGMT_STATUS_FAILED;
222 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
223 struct sock *skip_sk)
226 struct mgmt_hdr *hdr;
228 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
232 hdr = (void *) skb_put(skb, sizeof(*hdr));
233 hdr->opcode = cpu_to_le16(event);
235 hdr->index = cpu_to_le16(hdev->id);
237 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
238 hdr->len = cpu_to_le16(data_len);
241 memcpy(skb_put(skb, data_len), data, data_len);
244 __net_timestamp(skb);
246 hci_send_to_channel(HCI_CHANNEL_CONTROL, skb, skip_sk);
252 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
255 struct mgmt_hdr *hdr;
256 struct mgmt_ev_cmd_status *ev;
259 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
261 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
265 hdr = (void *) skb_put(skb, sizeof(*hdr));
267 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
268 hdr->index = cpu_to_le16(index);
269 hdr->len = cpu_to_le16(sizeof(*ev));
271 ev = (void *) skb_put(skb, sizeof(*ev));
273 ev->opcode = cpu_to_le16(cmd);
275 err = sock_queue_rcv_skb(sk, skb);
282 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
283 void *rp, size_t rp_len)
286 struct mgmt_hdr *hdr;
287 struct mgmt_ev_cmd_complete *ev;
290 BT_DBG("sock %p", sk);
292 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
296 hdr = (void *) skb_put(skb, sizeof(*hdr));
298 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
299 hdr->index = cpu_to_le16(index);
300 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
302 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
303 ev->opcode = cpu_to_le16(cmd);
307 memcpy(ev->data, rp, rp_len);
309 err = sock_queue_rcv_skb(sk, skb);
316 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
319 struct mgmt_rp_read_version rp;
321 BT_DBG("sock %p", sk);
323 rp.version = MGMT_VERSION;
324 rp.revision = cpu_to_le16(MGMT_REVISION);
326 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
330 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
333 struct mgmt_rp_read_commands *rp;
334 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
335 const u16 num_events = ARRAY_SIZE(mgmt_events);
340 BT_DBG("sock %p", sk);
342 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
344 rp = kmalloc(rp_size, GFP_KERNEL);
348 rp->num_commands = cpu_to_le16(num_commands);
349 rp->num_events = cpu_to_le16(num_events);
351 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
352 put_unaligned_le16(mgmt_commands[i], opcode);
354 for (i = 0; i < num_events; i++, opcode++)
355 put_unaligned_le16(mgmt_events[i], opcode);
357 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
364 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
367 struct mgmt_rp_read_index_list *rp;
373 BT_DBG("sock %p", sk);
375 read_lock(&hci_dev_list_lock);
378 list_for_each_entry(d, &hci_dev_list, list) {
379 if (d->dev_type == HCI_BREDR &&
380 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
384 rp_len = sizeof(*rp) + (2 * count);
385 rp = kmalloc(rp_len, GFP_ATOMIC);
387 read_unlock(&hci_dev_list_lock);
392 list_for_each_entry(d, &hci_dev_list, list) {
393 if (test_bit(HCI_SETUP, &d->dev_flags) ||
394 test_bit(HCI_CONFIG, &d->dev_flags) ||
395 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
398 /* Devices marked as raw-only are neither configured
399 * nor unconfigured controllers.
401 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
404 if (d->dev_type == HCI_BREDR &&
405 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
406 rp->index[count++] = cpu_to_le16(d->id);
407 BT_DBG("Added hci%u", d->id);
411 rp->num_controllers = cpu_to_le16(count);
412 rp_len = sizeof(*rp) + (2 * count);
414 read_unlock(&hci_dev_list_lock);
416 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
424 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
425 void *data, u16 data_len)
427 struct mgmt_rp_read_unconf_index_list *rp;
433 BT_DBG("sock %p", sk);
435 read_lock(&hci_dev_list_lock);
438 list_for_each_entry(d, &hci_dev_list, list) {
439 if (d->dev_type == HCI_BREDR &&
440 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
444 rp_len = sizeof(*rp) + (2 * count);
445 rp = kmalloc(rp_len, GFP_ATOMIC);
447 read_unlock(&hci_dev_list_lock);
452 list_for_each_entry(d, &hci_dev_list, list) {
453 if (test_bit(HCI_SETUP, &d->dev_flags) ||
454 test_bit(HCI_CONFIG, &d->dev_flags) ||
455 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
458 /* Devices marked as raw-only are neither configured
459 * nor unconfigured controllers.
461 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
464 if (d->dev_type == HCI_BREDR &&
465 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
466 rp->index[count++] = cpu_to_le16(d->id);
467 BT_DBG("Added hci%u", d->id);
471 rp->num_controllers = cpu_to_le16(count);
472 rp_len = sizeof(*rp) + (2 * count);
474 read_unlock(&hci_dev_list_lock);
476 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
484 static bool is_configured(struct hci_dev *hdev)
486 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
487 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
490 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
491 !bacmp(&hdev->public_addr, BDADDR_ANY))
497 static __le32 get_missing_options(struct hci_dev *hdev)
501 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
502 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
503 options |= MGMT_OPTION_EXTERNAL_CONFIG;
505 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
506 !bacmp(&hdev->public_addr, BDADDR_ANY))
507 options |= MGMT_OPTION_PUBLIC_ADDRESS;
509 return cpu_to_le32(options);
512 static int new_options(struct hci_dev *hdev, struct sock *skip)
514 __le32 options = get_missing_options(hdev);
516 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
517 sizeof(options), skip);
520 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
522 __le32 options = get_missing_options(hdev);
524 return cmd_complete(sk, hdev->id, opcode, 0, &options,
528 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
529 void *data, u16 data_len)
531 struct mgmt_rp_read_config_info rp;
534 BT_DBG("sock %p %s", sk, hdev->name);
538 memset(&rp, 0, sizeof(rp));
539 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
541 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
542 options |= MGMT_OPTION_EXTERNAL_CONFIG;
544 if (hdev->set_bdaddr)
545 options |= MGMT_OPTION_PUBLIC_ADDRESS;
547 rp.supported_options = cpu_to_le32(options);
548 rp.missing_options = get_missing_options(hdev);
550 hci_dev_unlock(hdev);
552 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
556 static u32 get_supported_settings(struct hci_dev *hdev)
560 settings |= MGMT_SETTING_POWERED;
561 settings |= MGMT_SETTING_BONDABLE;
562 settings |= MGMT_SETTING_DEBUG_KEYS;
563 settings |= MGMT_SETTING_CONNECTABLE;
564 settings |= MGMT_SETTING_DISCOVERABLE;
566 if (lmp_bredr_capable(hdev)) {
567 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
568 settings |= MGMT_SETTING_FAST_CONNECTABLE;
569 settings |= MGMT_SETTING_BREDR;
570 settings |= MGMT_SETTING_LINK_SECURITY;
572 if (lmp_ssp_capable(hdev)) {
573 settings |= MGMT_SETTING_SSP;
574 settings |= MGMT_SETTING_HS;
577 if (lmp_sc_capable(hdev))
578 settings |= MGMT_SETTING_SECURE_CONN;
581 if (lmp_le_capable(hdev)) {
582 settings |= MGMT_SETTING_LE;
583 settings |= MGMT_SETTING_ADVERTISING;
584 settings |= MGMT_SETTING_SECURE_CONN;
585 settings |= MGMT_SETTING_PRIVACY;
586 settings |= MGMT_SETTING_STATIC_ADDRESS;
589 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
591 settings |= MGMT_SETTING_CONFIGURATION;
596 static u32 get_current_settings(struct hci_dev *hdev)
600 if (hdev_is_powered(hdev))
601 settings |= MGMT_SETTING_POWERED;
603 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
604 settings |= MGMT_SETTING_CONNECTABLE;
606 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
607 settings |= MGMT_SETTING_FAST_CONNECTABLE;
609 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
610 settings |= MGMT_SETTING_DISCOVERABLE;
612 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
613 settings |= MGMT_SETTING_BONDABLE;
615 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
616 settings |= MGMT_SETTING_BREDR;
618 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
619 settings |= MGMT_SETTING_LE;
621 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
622 settings |= MGMT_SETTING_LINK_SECURITY;
624 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
625 settings |= MGMT_SETTING_SSP;
627 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
628 settings |= MGMT_SETTING_HS;
630 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
631 settings |= MGMT_SETTING_ADVERTISING;
633 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
634 settings |= MGMT_SETTING_SECURE_CONN;
636 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
637 settings |= MGMT_SETTING_DEBUG_KEYS;
639 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
640 settings |= MGMT_SETTING_PRIVACY;
642 /* The current setting for static address has two purposes. The
643 * first is to indicate if the static address will be used and
644 * the second is to indicate if it is actually set.
646 * This means if the static address is not configured, this flag
647 * will never bet set. If the address is configured, then if the
648 * address is actually used decides if the flag is set or not.
650 * For single mode LE only controllers and dual-mode controllers
651 * with BR/EDR disabled, the existence of the static address will
654 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
655 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
656 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
657 if (bacmp(&hdev->static_addr, BDADDR_ANY))
658 settings |= MGMT_SETTING_STATIC_ADDRESS;
664 #define PNP_INFO_SVCLASS_ID 0x1200
666 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
668 u8 *ptr = data, *uuids_start = NULL;
669 struct bt_uuid *uuid;
674 list_for_each_entry(uuid, &hdev->uuids, list) {
677 if (uuid->size != 16)
680 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
684 if (uuid16 == PNP_INFO_SVCLASS_ID)
690 uuids_start[1] = EIR_UUID16_ALL;
694 /* Stop if not enough space to put next UUID */
695 if ((ptr - data) + sizeof(u16) > len) {
696 uuids_start[1] = EIR_UUID16_SOME;
700 *ptr++ = (uuid16 & 0x00ff);
701 *ptr++ = (uuid16 & 0xff00) >> 8;
702 uuids_start[0] += sizeof(uuid16);
708 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
710 u8 *ptr = data, *uuids_start = NULL;
711 struct bt_uuid *uuid;
716 list_for_each_entry(uuid, &hdev->uuids, list) {
717 if (uuid->size != 32)
723 uuids_start[1] = EIR_UUID32_ALL;
727 /* Stop if not enough space to put next UUID */
728 if ((ptr - data) + sizeof(u32) > len) {
729 uuids_start[1] = EIR_UUID32_SOME;
733 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
735 uuids_start[0] += sizeof(u32);
741 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
743 u8 *ptr = data, *uuids_start = NULL;
744 struct bt_uuid *uuid;
749 list_for_each_entry(uuid, &hdev->uuids, list) {
750 if (uuid->size != 128)
756 uuids_start[1] = EIR_UUID128_ALL;
760 /* Stop if not enough space to put next UUID */
761 if ((ptr - data) + 16 > len) {
762 uuids_start[1] = EIR_UUID128_SOME;
766 memcpy(ptr, uuid->uuid, 16);
768 uuids_start[0] += 16;
774 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
776 struct pending_cmd *cmd;
778 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
779 if (cmd->opcode == opcode)
786 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
787 struct hci_dev *hdev,
790 struct pending_cmd *cmd;
792 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
793 if (cmd->user_data != data)
795 if (cmd->opcode == opcode)
802 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
807 name_len = strlen(hdev->dev_name);
809 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
811 if (name_len > max_len) {
813 ptr[1] = EIR_NAME_SHORT;
815 ptr[1] = EIR_NAME_COMPLETE;
817 ptr[0] = name_len + 1;
819 memcpy(ptr + 2, hdev->dev_name, name_len);
821 ad_len += (name_len + 2);
822 ptr += (name_len + 2);
828 static void update_scan_rsp_data(struct hci_request *req)
830 struct hci_dev *hdev = req->hdev;
831 struct hci_cp_le_set_scan_rsp_data cp;
834 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
837 memset(&cp, 0, sizeof(cp));
839 len = create_scan_rsp_data(hdev, cp.data);
841 if (hdev->scan_rsp_data_len == len &&
842 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
845 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
846 hdev->scan_rsp_data_len = len;
850 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
853 static u8 get_adv_discov_flags(struct hci_dev *hdev)
855 struct pending_cmd *cmd;
857 /* If there's a pending mgmt command the flags will not yet have
858 * their final values, so check for this first.
860 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
862 struct mgmt_mode *cp = cmd->param;
864 return LE_AD_GENERAL;
865 else if (cp->val == 0x02)
866 return LE_AD_LIMITED;
868 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
869 return LE_AD_LIMITED;
870 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
871 return LE_AD_GENERAL;
877 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
879 u8 ad_len = 0, flags = 0;
881 flags |= get_adv_discov_flags(hdev);
883 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
884 flags |= LE_AD_NO_BREDR;
887 BT_DBG("adv flags 0x%02x", flags);
897 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
899 ptr[1] = EIR_TX_POWER;
900 ptr[2] = (u8) hdev->adv_tx_power;
909 static void update_adv_data(struct hci_request *req)
911 struct hci_dev *hdev = req->hdev;
912 struct hci_cp_le_set_adv_data cp;
915 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
918 memset(&cp, 0, sizeof(cp));
920 len = create_adv_data(hdev, cp.data);
922 if (hdev->adv_data_len == len &&
923 memcmp(cp.data, hdev->adv_data, len) == 0)
926 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
927 hdev->adv_data_len = len;
931 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
934 int mgmt_update_adv_data(struct hci_dev *hdev)
936 struct hci_request req;
938 hci_req_init(&req, hdev);
939 update_adv_data(&req);
941 return hci_req_run(&req, NULL);
944 static void create_eir(struct hci_dev *hdev, u8 *data)
949 name_len = strlen(hdev->dev_name);
955 ptr[1] = EIR_NAME_SHORT;
957 ptr[1] = EIR_NAME_COMPLETE;
959 /* EIR Data length */
960 ptr[0] = name_len + 1;
962 memcpy(ptr + 2, hdev->dev_name, name_len);
964 ptr += (name_len + 2);
967 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
969 ptr[1] = EIR_TX_POWER;
970 ptr[2] = (u8) hdev->inq_tx_power;
975 if (hdev->devid_source > 0) {
977 ptr[1] = EIR_DEVICE_ID;
979 put_unaligned_le16(hdev->devid_source, ptr + 2);
980 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
981 put_unaligned_le16(hdev->devid_product, ptr + 6);
982 put_unaligned_le16(hdev->devid_version, ptr + 8);
987 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
988 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
989 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
992 static void update_eir(struct hci_request *req)
994 struct hci_dev *hdev = req->hdev;
995 struct hci_cp_write_eir cp;
997 if (!hdev_is_powered(hdev))
1000 if (!lmp_ext_inq_capable(hdev))
1003 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1006 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1009 memset(&cp, 0, sizeof(cp));
1011 create_eir(hdev, cp.data);
1013 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1016 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1018 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1021 static u8 get_service_classes(struct hci_dev *hdev)
1023 struct bt_uuid *uuid;
1026 list_for_each_entry(uuid, &hdev->uuids, list)
1027 val |= uuid->svc_hint;
1032 static void update_class(struct hci_request *req)
1034 struct hci_dev *hdev = req->hdev;
1037 BT_DBG("%s", hdev->name);
1039 if (!hdev_is_powered(hdev))
1042 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1045 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1048 cod[0] = hdev->minor_class;
1049 cod[1] = hdev->major_class;
1050 cod[2] = get_service_classes(hdev);
1052 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1055 if (memcmp(cod, hdev->dev_class, 3) == 0)
1058 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1061 static bool get_connectable(struct hci_dev *hdev)
1063 struct pending_cmd *cmd;
1065 /* If there's a pending mgmt command the flag will not yet have
1066 * it's final value, so check for this first.
1068 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1070 struct mgmt_mode *cp = cmd->param;
1074 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1077 static void disable_advertising(struct hci_request *req)
1081 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1084 static void enable_advertising(struct hci_request *req)
1086 struct hci_dev *hdev = req->hdev;
1087 struct hci_cp_le_set_adv_param cp;
1088 u8 own_addr_type, enable = 0x01;
1091 if (hci_conn_num(hdev, LE_LINK) > 0)
1094 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1095 disable_advertising(req);
1097 /* Clear the HCI_LE_ADV bit temporarily so that the
1098 * hci_update_random_address knows that it's safe to go ahead
1099 * and write a new random address. The flag will be set back on
1100 * as soon as the SET_ADV_ENABLE HCI command completes.
1102 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1104 connectable = get_connectable(hdev);
1106 /* Set require_privacy to true only when non-connectable
1107 * advertising is used. In that case it is fine to use a
1108 * non-resolvable private address.
1110 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1113 memset(&cp, 0, sizeof(cp));
1114 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1115 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1116 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1117 cp.own_address_type = own_addr_type;
1118 cp.channel_map = hdev->le_adv_channel_map;
1120 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1122 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1125 static void service_cache_off(struct work_struct *work)
1127 struct hci_dev *hdev = container_of(work, struct hci_dev,
1128 service_cache.work);
1129 struct hci_request req;
1131 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1134 hci_req_init(&req, hdev);
1141 hci_dev_unlock(hdev);
1143 hci_req_run(&req, NULL);
1146 static void rpa_expired(struct work_struct *work)
1148 struct hci_dev *hdev = container_of(work, struct hci_dev,
1150 struct hci_request req;
1154 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1156 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1159 /* The generation of a new RPA and programming it into the
1160 * controller happens in the enable_advertising() function.
1162 hci_req_init(&req, hdev);
1163 enable_advertising(&req);
1164 hci_req_run(&req, NULL);
1167 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1169 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1172 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1173 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1175 /* Non-mgmt controlled devices get this bit set
1176 * implicitly so that pairing works for them, however
1177 * for mgmt we require user-space to explicitly enable
1180 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1183 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1184 void *data, u16 data_len)
1186 struct mgmt_rp_read_info rp;
1188 BT_DBG("sock %p %s", sk, hdev->name);
1192 memset(&rp, 0, sizeof(rp));
1194 bacpy(&rp.bdaddr, &hdev->bdaddr);
1196 rp.version = hdev->hci_ver;
1197 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1199 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1200 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1202 memcpy(rp.dev_class, hdev->dev_class, 3);
1204 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1205 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1207 hci_dev_unlock(hdev);
1209 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1213 static void mgmt_pending_free(struct pending_cmd *cmd)
1220 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1221 struct hci_dev *hdev, void *data,
1224 struct pending_cmd *cmd;
1226 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1230 cmd->opcode = opcode;
1231 cmd->index = hdev->id;
1233 cmd->param = kmemdup(data, len, GFP_KERNEL);
1239 cmd->param_len = len;
1244 list_add(&cmd->list, &hdev->mgmt_pending);
1249 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1250 void (*cb)(struct pending_cmd *cmd,
1254 struct pending_cmd *cmd, *tmp;
1256 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1257 if (opcode > 0 && cmd->opcode != opcode)
1264 static void mgmt_pending_remove(struct pending_cmd *cmd)
1266 list_del(&cmd->list);
1267 mgmt_pending_free(cmd);
1270 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1272 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1274 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1278 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1280 BT_DBG("%s status 0x%02x", hdev->name, status);
1282 if (hci_conn_count(hdev) == 0) {
1283 cancel_delayed_work(&hdev->power_off);
1284 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1288 static bool hci_stop_discovery(struct hci_request *req)
1290 struct hci_dev *hdev = req->hdev;
1291 struct hci_cp_remote_name_req_cancel cp;
1292 struct inquiry_entry *e;
1294 switch (hdev->discovery.state) {
1295 case DISCOVERY_FINDING:
1296 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1297 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1299 cancel_delayed_work(&hdev->le_scan_disable);
1300 hci_req_add_le_scan_disable(req);
1305 case DISCOVERY_RESOLVING:
1306 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1311 bacpy(&cp.bdaddr, &e->data.bdaddr);
1312 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1318 /* Passive scanning */
1319 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1320 hci_req_add_le_scan_disable(req);
1330 static int clean_up_hci_state(struct hci_dev *hdev)
1332 struct hci_request req;
1333 struct hci_conn *conn;
1334 bool discov_stopped;
1337 hci_req_init(&req, hdev);
1339 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1340 test_bit(HCI_PSCAN, &hdev->flags)) {
1342 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1345 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1346 disable_advertising(&req);
1348 discov_stopped = hci_stop_discovery(&req);
1350 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1351 struct hci_cp_disconnect dc;
1352 struct hci_cp_reject_conn_req rej;
1354 switch (conn->state) {
1357 dc.handle = cpu_to_le16(conn->handle);
1358 dc.reason = 0x15; /* Terminated due to Power Off */
1359 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1362 if (conn->type == LE_LINK)
1363 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1365 else if (conn->type == ACL_LINK)
1366 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1370 bacpy(&rej.bdaddr, &conn->dst);
1371 rej.reason = 0x15; /* Terminated due to Power Off */
1372 if (conn->type == ACL_LINK)
1373 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1375 else if (conn->type == SCO_LINK)
1376 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1382 err = hci_req_run(&req, clean_up_hci_complete);
1383 if (!err && discov_stopped)
1384 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1389 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1392 struct mgmt_mode *cp = data;
1393 struct pending_cmd *cmd;
1396 BT_DBG("request for %s", hdev->name);
1398 if (cp->val != 0x00 && cp->val != 0x01)
1399 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1400 MGMT_STATUS_INVALID_PARAMS);
1404 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1405 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1410 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1411 cancel_delayed_work(&hdev->power_off);
1414 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1416 err = mgmt_powered(hdev, 1);
1421 if (!!cp->val == hdev_is_powered(hdev)) {
1422 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1426 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1433 queue_work(hdev->req_workqueue, &hdev->power_on);
1436 /* Disconnect connections, stop scans, etc */
1437 err = clean_up_hci_state(hdev);
1439 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1440 HCI_POWER_OFF_TIMEOUT);
1442 /* ENODATA means there were no HCI commands queued */
1443 if (err == -ENODATA) {
1444 cancel_delayed_work(&hdev->power_off);
1445 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1451 hci_dev_unlock(hdev);
1455 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1459 ev = cpu_to_le32(get_current_settings(hdev));
1461 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1464 int mgmt_new_settings(struct hci_dev *hdev)
1466 return new_settings(hdev, NULL);
1471 struct hci_dev *hdev;
1475 static void settings_rsp(struct pending_cmd *cmd, void *data)
1477 struct cmd_lookup *match = data;
1479 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1481 list_del(&cmd->list);
1483 if (match->sk == NULL) {
1484 match->sk = cmd->sk;
1485 sock_hold(match->sk);
1488 mgmt_pending_free(cmd);
1491 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1495 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1496 mgmt_pending_remove(cmd);
1499 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1501 if (cmd->cmd_complete) {
1504 cmd->cmd_complete(cmd, *status);
1505 mgmt_pending_remove(cmd);
1510 cmd_status_rsp(cmd, data);
1513 static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1515 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1516 cmd->param, cmd->param_len);
1519 static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1521 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1522 sizeof(struct mgmt_addr_info));
1525 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1527 if (!lmp_bredr_capable(hdev))
1528 return MGMT_STATUS_NOT_SUPPORTED;
1529 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1530 return MGMT_STATUS_REJECTED;
1532 return MGMT_STATUS_SUCCESS;
1535 static u8 mgmt_le_support(struct hci_dev *hdev)
1537 if (!lmp_le_capable(hdev))
1538 return MGMT_STATUS_NOT_SUPPORTED;
1539 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1540 return MGMT_STATUS_REJECTED;
1542 return MGMT_STATUS_SUCCESS;
1545 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1548 struct pending_cmd *cmd;
1549 struct mgmt_mode *cp;
1550 struct hci_request req;
1553 BT_DBG("status 0x%02x", status);
1557 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1562 u8 mgmt_err = mgmt_status(status);
1563 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1564 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1570 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1573 if (hdev->discov_timeout > 0) {
1574 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1575 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1579 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1583 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1586 new_settings(hdev, cmd->sk);
1588 /* When the discoverable mode gets changed, make sure
1589 * that class of device has the limited discoverable
1590 * bit correctly set. Also update page scan based on whitelist
1593 hci_req_init(&req, hdev);
1594 __hci_update_page_scan(&req);
1596 hci_req_run(&req, NULL);
1599 mgmt_pending_remove(cmd);
1602 hci_dev_unlock(hdev);
1605 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1608 struct mgmt_cp_set_discoverable *cp = data;
1609 struct pending_cmd *cmd;
1610 struct hci_request req;
1615 BT_DBG("request for %s", hdev->name);
1617 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1618 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1619 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1620 MGMT_STATUS_REJECTED);
1622 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1623 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1624 MGMT_STATUS_INVALID_PARAMS);
1626 timeout = __le16_to_cpu(cp->timeout);
1628 /* Disabling discoverable requires that no timeout is set,
1629 * and enabling limited discoverable requires a timeout.
1631 if ((cp->val == 0x00 && timeout > 0) ||
1632 (cp->val == 0x02 && timeout == 0))
1633 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1634 MGMT_STATUS_INVALID_PARAMS);
1638 if (!hdev_is_powered(hdev) && timeout > 0) {
1639 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1640 MGMT_STATUS_NOT_POWERED);
1644 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1645 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1646 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1651 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1652 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1653 MGMT_STATUS_REJECTED);
1657 if (!hdev_is_powered(hdev)) {
1658 bool changed = false;
1660 /* Setting limited discoverable when powered off is
1661 * not a valid operation since it requires a timeout
1662 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1664 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1665 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1669 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1674 err = new_settings(hdev, sk);
1679 /* If the current mode is the same, then just update the timeout
1680 * value with the new value. And if only the timeout gets updated,
1681 * then no need for any HCI transactions.
1683 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1684 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1685 &hdev->dev_flags)) {
1686 cancel_delayed_work(&hdev->discov_off);
1687 hdev->discov_timeout = timeout;
1689 if (cp->val && hdev->discov_timeout > 0) {
1690 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1691 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1695 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1699 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1705 /* Cancel any potential discoverable timeout that might be
1706 * still active and store new timeout value. The arming of
1707 * the timeout happens in the complete handler.
1709 cancel_delayed_work(&hdev->discov_off);
1710 hdev->discov_timeout = timeout;
1712 /* Limited discoverable mode */
1713 if (cp->val == 0x02)
1714 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1716 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1718 hci_req_init(&req, hdev);
1720 /* The procedure for LE-only controllers is much simpler - just
1721 * update the advertising data.
1723 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1729 struct hci_cp_write_current_iac_lap hci_cp;
1731 if (cp->val == 0x02) {
1732 /* Limited discoverable mode */
1733 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1734 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1735 hci_cp.iac_lap[1] = 0x8b;
1736 hci_cp.iac_lap[2] = 0x9e;
1737 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1738 hci_cp.iac_lap[4] = 0x8b;
1739 hci_cp.iac_lap[5] = 0x9e;
1741 /* General discoverable mode */
1743 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1744 hci_cp.iac_lap[1] = 0x8b;
1745 hci_cp.iac_lap[2] = 0x9e;
1748 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1749 (hci_cp.num_iac * 3) + 1, &hci_cp);
1751 scan |= SCAN_INQUIRY;
1753 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1756 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1759 update_adv_data(&req);
1761 err = hci_req_run(&req, set_discoverable_complete);
1763 mgmt_pending_remove(cmd);
1766 hci_dev_unlock(hdev);
1770 static void write_fast_connectable(struct hci_request *req, bool enable)
1772 struct hci_dev *hdev = req->hdev;
1773 struct hci_cp_write_page_scan_activity acp;
1776 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1779 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1783 type = PAGE_SCAN_TYPE_INTERLACED;
1785 /* 160 msec page scan interval */
1786 acp.interval = cpu_to_le16(0x0100);
1788 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1790 /* default 1.28 sec page scan */
1791 acp.interval = cpu_to_le16(0x0800);
1794 acp.window = cpu_to_le16(0x0012);
1796 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1797 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1798 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1801 if (hdev->page_scan_type != type)
1802 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1805 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1808 struct pending_cmd *cmd;
1809 struct mgmt_mode *cp;
1810 bool conn_changed, discov_changed;
1812 BT_DBG("status 0x%02x", status);
1816 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1821 u8 mgmt_err = mgmt_status(status);
1822 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1828 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1830 discov_changed = false;
1832 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1834 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1838 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1840 if (conn_changed || discov_changed) {
1841 new_settings(hdev, cmd->sk);
1842 hci_update_page_scan(hdev);
1844 mgmt_update_adv_data(hdev);
1845 hci_update_background_scan(hdev);
1849 mgmt_pending_remove(cmd);
1852 hci_dev_unlock(hdev);
1855 static int set_connectable_update_settings(struct hci_dev *hdev,
1856 struct sock *sk, u8 val)
1858 bool changed = false;
1861 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1865 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1867 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1868 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1871 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1876 hci_update_page_scan(hdev);
1877 hci_update_background_scan(hdev);
1878 return new_settings(hdev, sk);
1884 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1887 struct mgmt_mode *cp = data;
1888 struct pending_cmd *cmd;
1889 struct hci_request req;
1893 BT_DBG("request for %s", hdev->name);
1895 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1896 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1897 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1898 MGMT_STATUS_REJECTED);
1900 if (cp->val != 0x00 && cp->val != 0x01)
1901 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1902 MGMT_STATUS_INVALID_PARAMS);
1906 if (!hdev_is_powered(hdev)) {
1907 err = set_connectable_update_settings(hdev, sk, cp->val);
1911 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1912 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1913 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1918 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1924 hci_req_init(&req, hdev);
1926 /* If BR/EDR is not enabled and we disable advertising as a
1927 * by-product of disabling connectable, we need to update the
1928 * advertising flags.
1930 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1932 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1933 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1935 update_adv_data(&req);
1936 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1940 /* If we don't have any whitelist entries just
1941 * disable all scanning. If there are entries
1942 * and we had both page and inquiry scanning
1943 * enabled then fall back to only page scanning.
1944 * Otherwise no changes are needed.
1946 if (list_empty(&hdev->whitelist))
1947 scan = SCAN_DISABLED;
1948 else if (test_bit(HCI_ISCAN, &hdev->flags))
1951 goto no_scan_update;
1953 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1954 hdev->discov_timeout > 0)
1955 cancel_delayed_work(&hdev->discov_off);
1958 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1962 /* If we're going from non-connectable to connectable or
1963 * vice-versa when fast connectable is enabled ensure that fast
1964 * connectable gets disabled. write_fast_connectable won't do
1965 * anything if the page scan parameters are already what they
1968 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1969 write_fast_connectable(&req, false);
1971 /* Update the advertising parameters if necessary */
1972 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1973 enable_advertising(&req);
1975 err = hci_req_run(&req, set_connectable_complete);
1977 mgmt_pending_remove(cmd);
1978 if (err == -ENODATA)
1979 err = set_connectable_update_settings(hdev, sk,
1985 hci_dev_unlock(hdev);
1989 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1992 struct mgmt_mode *cp = data;
1996 BT_DBG("request for %s", hdev->name);
1998 if (cp->val != 0x00 && cp->val != 0x01)
1999 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2000 MGMT_STATUS_INVALID_PARAMS);
2005 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
2007 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
2009 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2014 err = new_settings(hdev, sk);
2017 hci_dev_unlock(hdev);
2021 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2024 struct mgmt_mode *cp = data;
2025 struct pending_cmd *cmd;
2029 BT_DBG("request for %s", hdev->name);
2031 status = mgmt_bredr_support(hdev);
2033 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2036 if (cp->val != 0x00 && cp->val != 0x01)
2037 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2038 MGMT_STATUS_INVALID_PARAMS);
2042 if (!hdev_is_powered(hdev)) {
2043 bool changed = false;
2045 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2046 &hdev->dev_flags)) {
2047 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2051 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2056 err = new_settings(hdev, sk);
2061 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2062 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2069 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2070 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2074 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2080 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2082 mgmt_pending_remove(cmd);
2087 hci_dev_unlock(hdev);
2091 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2093 struct mgmt_mode *cp = data;
2094 struct pending_cmd *cmd;
2098 BT_DBG("request for %s", hdev->name);
2100 status = mgmt_bredr_support(hdev);
2102 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2104 if (!lmp_ssp_capable(hdev))
2105 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2106 MGMT_STATUS_NOT_SUPPORTED);
2108 if (cp->val != 0x00 && cp->val != 0x01)
2109 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2110 MGMT_STATUS_INVALID_PARAMS);
2114 if (!hdev_is_powered(hdev)) {
2118 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2121 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2124 changed = test_and_clear_bit(HCI_HS_ENABLED,
2127 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2130 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2135 err = new_settings(hdev, sk);
2140 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2141 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2146 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2147 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2151 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2157 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2158 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2159 sizeof(cp->val), &cp->val);
2161 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2163 mgmt_pending_remove(cmd);
2168 hci_dev_unlock(hdev);
2172 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2174 struct mgmt_mode *cp = data;
2179 BT_DBG("request for %s", hdev->name);
2181 status = mgmt_bredr_support(hdev);
2183 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2185 if (!lmp_ssp_capable(hdev))
2186 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2187 MGMT_STATUS_NOT_SUPPORTED);
2189 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2190 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2191 MGMT_STATUS_REJECTED);
2193 if (cp->val != 0x00 && cp->val != 0x01)
2194 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2195 MGMT_STATUS_INVALID_PARAMS);
2199 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2200 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2206 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2208 if (hdev_is_powered(hdev)) {
2209 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2210 MGMT_STATUS_REJECTED);
2214 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2217 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2222 err = new_settings(hdev, sk);
2225 hci_dev_unlock(hdev);
2229 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2231 struct cmd_lookup match = { NULL, hdev };
2236 u8 mgmt_err = mgmt_status(status);
2238 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2243 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2245 new_settings(hdev, match.sk);
2250 /* Make sure the controller has a good default for
2251 * advertising data. Restrict the update to when LE
2252 * has actually been enabled. During power on, the
2253 * update in powered_update_hci will take care of it.
2255 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2256 struct hci_request req;
2258 hci_req_init(&req, hdev);
2259 update_adv_data(&req);
2260 update_scan_rsp_data(&req);
2261 __hci_update_background_scan(&req);
2262 hci_req_run(&req, NULL);
2266 hci_dev_unlock(hdev);
2269 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2271 struct mgmt_mode *cp = data;
2272 struct hci_cp_write_le_host_supported hci_cp;
2273 struct pending_cmd *cmd;
2274 struct hci_request req;
2278 BT_DBG("request for %s", hdev->name);
2280 if (!lmp_le_capable(hdev))
2281 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2282 MGMT_STATUS_NOT_SUPPORTED);
2284 if (cp->val != 0x00 && cp->val != 0x01)
2285 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2286 MGMT_STATUS_INVALID_PARAMS);
2288 /* LE-only devices do not allow toggling LE on/off */
2289 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2290 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2291 MGMT_STATUS_REJECTED);
2296 enabled = lmp_host_le_capable(hdev);
2298 if (!hdev_is_powered(hdev) || val == enabled) {
2299 bool changed = false;
2301 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2302 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2306 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2307 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2311 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2316 err = new_settings(hdev, sk);
2321 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2322 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2323 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2328 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2334 hci_req_init(&req, hdev);
2336 memset(&hci_cp, 0, sizeof(hci_cp));
2340 hci_cp.simul = 0x00;
2342 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2343 disable_advertising(&req);
2346 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2349 err = hci_req_run(&req, le_enable_complete);
2351 mgmt_pending_remove(cmd);
2354 hci_dev_unlock(hdev);
2358 /* This is a helper function to test for pending mgmt commands that can
2359 * cause CoD or EIR HCI commands. We can only allow one such pending
2360 * mgmt command at a time since otherwise we cannot easily track what
2361 * the current values are, will be, and based on that calculate if a new
2362 * HCI command needs to be sent and if yes with what value.
2364 static bool pending_eir_or_class(struct hci_dev *hdev)
2366 struct pending_cmd *cmd;
2368 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2369 switch (cmd->opcode) {
2370 case MGMT_OP_ADD_UUID:
2371 case MGMT_OP_REMOVE_UUID:
2372 case MGMT_OP_SET_DEV_CLASS:
2373 case MGMT_OP_SET_POWERED:
2381 static const u8 bluetooth_base_uuid[] = {
2382 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2383 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2386 static u8 get_uuid_size(const u8 *uuid)
2390 if (memcmp(uuid, bluetooth_base_uuid, 12))
2393 val = get_unaligned_le32(&uuid[12]);
2400 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2402 struct pending_cmd *cmd;
2406 cmd = mgmt_pending_find(mgmt_op, hdev);
2410 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2411 hdev->dev_class, 3);
2413 mgmt_pending_remove(cmd);
2416 hci_dev_unlock(hdev);
2419 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2421 BT_DBG("status 0x%02x", status);
2423 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2426 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2428 struct mgmt_cp_add_uuid *cp = data;
2429 struct pending_cmd *cmd;
2430 struct hci_request req;
2431 struct bt_uuid *uuid;
2434 BT_DBG("request for %s", hdev->name);
2438 if (pending_eir_or_class(hdev)) {
2439 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2444 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2450 memcpy(uuid->uuid, cp->uuid, 16);
2451 uuid->svc_hint = cp->svc_hint;
2452 uuid->size = get_uuid_size(cp->uuid);
2454 list_add_tail(&uuid->list, &hdev->uuids);
2456 hci_req_init(&req, hdev);
2461 err = hci_req_run(&req, add_uuid_complete);
2463 if (err != -ENODATA)
2466 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2467 hdev->dev_class, 3);
2471 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2480 hci_dev_unlock(hdev);
2484 static bool enable_service_cache(struct hci_dev *hdev)
2486 if (!hdev_is_powered(hdev))
2489 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2490 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2498 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2500 BT_DBG("status 0x%02x", status);
2502 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2505 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2508 struct mgmt_cp_remove_uuid *cp = data;
2509 struct pending_cmd *cmd;
2510 struct bt_uuid *match, *tmp;
2511 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2512 struct hci_request req;
2515 BT_DBG("request for %s", hdev->name);
2519 if (pending_eir_or_class(hdev)) {
2520 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2525 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2526 hci_uuids_clear(hdev);
2528 if (enable_service_cache(hdev)) {
2529 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2530 0, hdev->dev_class, 3);
2539 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2540 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2543 list_del(&match->list);
2549 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2550 MGMT_STATUS_INVALID_PARAMS);
2555 hci_req_init(&req, hdev);
2560 err = hci_req_run(&req, remove_uuid_complete);
2562 if (err != -ENODATA)
2565 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2566 hdev->dev_class, 3);
2570 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2579 hci_dev_unlock(hdev);
2583 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2585 BT_DBG("status 0x%02x", status);
2587 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2590 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2593 struct mgmt_cp_set_dev_class *cp = data;
2594 struct pending_cmd *cmd;
2595 struct hci_request req;
2598 BT_DBG("request for %s", hdev->name);
2600 if (!lmp_bredr_capable(hdev))
2601 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2602 MGMT_STATUS_NOT_SUPPORTED);
2606 if (pending_eir_or_class(hdev)) {
2607 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2612 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2613 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2614 MGMT_STATUS_INVALID_PARAMS);
2618 hdev->major_class = cp->major;
2619 hdev->minor_class = cp->minor;
2621 if (!hdev_is_powered(hdev)) {
2622 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2623 hdev->dev_class, 3);
2627 hci_req_init(&req, hdev);
2629 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2630 hci_dev_unlock(hdev);
2631 cancel_delayed_work_sync(&hdev->service_cache);
2638 err = hci_req_run(&req, set_class_complete);
2640 if (err != -ENODATA)
2643 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2644 hdev->dev_class, 3);
2648 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2657 hci_dev_unlock(hdev);
2661 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2664 struct mgmt_cp_load_link_keys *cp = data;
2665 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2666 sizeof(struct mgmt_link_key_info));
2667 u16 key_count, expected_len;
2671 BT_DBG("request for %s", hdev->name);
2673 if (!lmp_bredr_capable(hdev))
2674 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2675 MGMT_STATUS_NOT_SUPPORTED);
2677 key_count = __le16_to_cpu(cp->key_count);
2678 if (key_count > max_key_count) {
2679 BT_ERR("load_link_keys: too big key_count value %u",
2681 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2682 MGMT_STATUS_INVALID_PARAMS);
2685 expected_len = sizeof(*cp) + key_count *
2686 sizeof(struct mgmt_link_key_info);
2687 if (expected_len != len) {
2688 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2690 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2691 MGMT_STATUS_INVALID_PARAMS);
2694 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2695 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2696 MGMT_STATUS_INVALID_PARAMS);
2698 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2701 for (i = 0; i < key_count; i++) {
2702 struct mgmt_link_key_info *key = &cp->keys[i];
2704 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2705 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2706 MGMT_STATUS_INVALID_PARAMS);
2711 hci_link_keys_clear(hdev);
2714 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2717 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2721 new_settings(hdev, NULL);
2723 for (i = 0; i < key_count; i++) {
2724 struct mgmt_link_key_info *key = &cp->keys[i];
2726 /* Always ignore debug keys and require a new pairing if
2727 * the user wants to use them.
2729 if (key->type == HCI_LK_DEBUG_COMBINATION)
2732 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2733 key->type, key->pin_len, NULL);
2736 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2738 hci_dev_unlock(hdev);
2743 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2744 u8 addr_type, struct sock *skip_sk)
2746 struct mgmt_ev_device_unpaired ev;
2748 bacpy(&ev.addr.bdaddr, bdaddr);
2749 ev.addr.type = addr_type;
2751 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2755 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2758 struct mgmt_cp_unpair_device *cp = data;
2759 struct mgmt_rp_unpair_device rp;
2760 struct hci_cp_disconnect dc;
2761 struct pending_cmd *cmd;
2762 struct hci_conn *conn;
2765 memset(&rp, 0, sizeof(rp));
2766 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2767 rp.addr.type = cp->addr.type;
2769 if (!bdaddr_type_is_valid(cp->addr.type))
2770 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2771 MGMT_STATUS_INVALID_PARAMS,
2774 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2775 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2776 MGMT_STATUS_INVALID_PARAMS,
2781 if (!hdev_is_powered(hdev)) {
2782 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2783 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2787 if (cp->addr.type == BDADDR_BREDR) {
2788 /* If disconnection is requested, then look up the
2789 * connection. If the remote device is connected, it
2790 * will be later used to terminate the link.
2792 * Setting it to NULL explicitly will cause no
2793 * termination of the link.
2796 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2801 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2805 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2808 /* Defer clearing up the connection parameters
2809 * until closing to give a chance of keeping
2810 * them if a repairing happens.
2812 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2814 /* If disconnection is not requested, then
2815 * clear the connection variable so that the
2816 * link is not terminated.
2818 if (!cp->disconnect)
2822 if (cp->addr.type == BDADDR_LE_PUBLIC)
2823 addr_type = ADDR_LE_DEV_PUBLIC;
2825 addr_type = ADDR_LE_DEV_RANDOM;
2827 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2829 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2833 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2834 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2838 /* If the connection variable is set, then termination of the
2839 * link is requested.
2842 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2844 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2848 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2855 cmd->cmd_complete = addr_cmd_complete;
2857 dc.handle = cpu_to_le16(conn->handle);
2858 dc.reason = 0x13; /* Remote User Terminated Connection */
2859 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2861 mgmt_pending_remove(cmd);
2864 hci_dev_unlock(hdev);
2868 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2871 struct mgmt_cp_disconnect *cp = data;
2872 struct mgmt_rp_disconnect rp;
2873 struct pending_cmd *cmd;
2874 struct hci_conn *conn;
2879 memset(&rp, 0, sizeof(rp));
2880 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2881 rp.addr.type = cp->addr.type;
2883 if (!bdaddr_type_is_valid(cp->addr.type))
2884 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2885 MGMT_STATUS_INVALID_PARAMS,
2890 if (!test_bit(HCI_UP, &hdev->flags)) {
2891 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2892 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2896 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2897 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2898 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2902 if (cp->addr.type == BDADDR_BREDR)
2903 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2906 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2908 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2909 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2910 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2914 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2920 cmd->cmd_complete = generic_cmd_complete;
2922 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2924 mgmt_pending_remove(cmd);
2927 hci_dev_unlock(hdev);
2931 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2933 switch (link_type) {
2935 switch (addr_type) {
2936 case ADDR_LE_DEV_PUBLIC:
2937 return BDADDR_LE_PUBLIC;
2940 /* Fallback to LE Random address type */
2941 return BDADDR_LE_RANDOM;
2945 /* Fallback to BR/EDR type */
2946 return BDADDR_BREDR;
2950 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2953 struct mgmt_rp_get_connections *rp;
2963 if (!hdev_is_powered(hdev)) {
2964 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2965 MGMT_STATUS_NOT_POWERED);
2970 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2971 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2975 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2976 rp = kmalloc(rp_len, GFP_KERNEL);
2983 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2984 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2986 bacpy(&rp->addr[i].bdaddr, &c->dst);
2987 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2988 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2993 rp->conn_count = cpu_to_le16(i);
2995 /* Recalculate length in case of filtered SCO connections, etc */
2996 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2998 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3004 hci_dev_unlock(hdev);
3008 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3009 struct mgmt_cp_pin_code_neg_reply *cp)
3011 struct pending_cmd *cmd;
3014 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3019 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3020 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3022 mgmt_pending_remove(cmd);
3027 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3030 struct hci_conn *conn;
3031 struct mgmt_cp_pin_code_reply *cp = data;
3032 struct hci_cp_pin_code_reply reply;
3033 struct pending_cmd *cmd;
3040 if (!hdev_is_powered(hdev)) {
3041 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3042 MGMT_STATUS_NOT_POWERED);
3046 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3048 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3049 MGMT_STATUS_NOT_CONNECTED);
3053 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3054 struct mgmt_cp_pin_code_neg_reply ncp;
3056 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3058 BT_ERR("PIN code is not 16 bytes long");
3060 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3062 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3063 MGMT_STATUS_INVALID_PARAMS);
3068 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3074 cmd->cmd_complete = addr_cmd_complete;
3076 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3077 reply.pin_len = cp->pin_len;
3078 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3080 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3082 mgmt_pending_remove(cmd);
3085 hci_dev_unlock(hdev);
3089 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3092 struct mgmt_cp_set_io_capability *cp = data;
3096 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3097 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3098 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3102 hdev->io_capability = cp->io_capability;
3104 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3105 hdev->io_capability);
3107 hci_dev_unlock(hdev);
3109 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3113 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3115 struct hci_dev *hdev = conn->hdev;
3116 struct pending_cmd *cmd;
3118 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3119 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3122 if (cmd->user_data != conn)
3131 static int pairing_complete(struct pending_cmd *cmd, u8 status)
3133 struct mgmt_rp_pair_device rp;
3134 struct hci_conn *conn = cmd->user_data;
3137 bacpy(&rp.addr.bdaddr, &conn->dst);
3138 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3140 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3143 /* So we don't get further callbacks for this connection */
3144 conn->connect_cfm_cb = NULL;
3145 conn->security_cfm_cb = NULL;
3146 conn->disconn_cfm_cb = NULL;
3148 hci_conn_drop(conn);
3150 /* The device is paired so there is no need to remove
3151 * its connection parameters anymore.
3153 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3160 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3162 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3163 struct pending_cmd *cmd;
3165 cmd = find_pairing(conn);
3167 cmd->cmd_complete(cmd, status);
3168 mgmt_pending_remove(cmd);
3172 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3174 struct pending_cmd *cmd;
3176 BT_DBG("status %u", status);
3178 cmd = find_pairing(conn);
3180 BT_DBG("Unable to find a pending command");
3184 cmd->cmd_complete(cmd, mgmt_status(status));
3185 mgmt_pending_remove(cmd);
3188 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3190 struct pending_cmd *cmd;
3192 BT_DBG("status %u", status);
3197 cmd = find_pairing(conn);
3199 BT_DBG("Unable to find a pending command");
3203 cmd->cmd_complete(cmd, mgmt_status(status));
3204 mgmt_pending_remove(cmd);
3207 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3210 struct mgmt_cp_pair_device *cp = data;
3211 struct mgmt_rp_pair_device rp;
3212 struct pending_cmd *cmd;
3213 u8 sec_level, auth_type;
3214 struct hci_conn *conn;
3219 memset(&rp, 0, sizeof(rp));
3220 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3221 rp.addr.type = cp->addr.type;
3223 if (!bdaddr_type_is_valid(cp->addr.type))
3224 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3225 MGMT_STATUS_INVALID_PARAMS,
3228 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3229 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3230 MGMT_STATUS_INVALID_PARAMS,
3235 if (!hdev_is_powered(hdev)) {
3236 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3237 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3241 sec_level = BT_SECURITY_MEDIUM;
3242 auth_type = HCI_AT_DEDICATED_BONDING;
3244 if (cp->addr.type == BDADDR_BREDR) {
3245 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3250 /* Convert from L2CAP channel address type to HCI address type
3252 if (cp->addr.type == BDADDR_LE_PUBLIC)
3253 addr_type = ADDR_LE_DEV_PUBLIC;
3255 addr_type = ADDR_LE_DEV_RANDOM;
3257 /* When pairing a new device, it is expected to remember
3258 * this device for future connections. Adding the connection
3259 * parameter information ahead of time allows tracking
3260 * of the slave preferred values and will speed up any
3261 * further connection establishment.
3263 * If connection parameters already exist, then they
3264 * will be kept and this function does nothing.
3266 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3268 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3269 sec_level, HCI_LE_CONN_TIMEOUT,
3276 if (PTR_ERR(conn) == -EBUSY)
3277 status = MGMT_STATUS_BUSY;
3278 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3279 status = MGMT_STATUS_NOT_SUPPORTED;
3280 else if (PTR_ERR(conn) == -ECONNREFUSED)
3281 status = MGMT_STATUS_REJECTED;
3283 status = MGMT_STATUS_CONNECT_FAILED;
3285 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3291 if (conn->connect_cfm_cb) {
3292 hci_conn_drop(conn);
3293 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3294 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3298 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3301 hci_conn_drop(conn);
3305 cmd->cmd_complete = pairing_complete;
3307 /* For LE, just connecting isn't a proof that the pairing finished */
3308 if (cp->addr.type == BDADDR_BREDR) {
3309 conn->connect_cfm_cb = pairing_complete_cb;
3310 conn->security_cfm_cb = pairing_complete_cb;
3311 conn->disconn_cfm_cb = pairing_complete_cb;
3313 conn->connect_cfm_cb = le_pairing_complete_cb;
3314 conn->security_cfm_cb = le_pairing_complete_cb;
3315 conn->disconn_cfm_cb = le_pairing_complete_cb;
3318 conn->io_capability = cp->io_cap;
3319 cmd->user_data = hci_conn_get(conn);
3321 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3322 hci_conn_security(conn, sec_level, auth_type, true)) {
3323 cmd->cmd_complete(cmd, 0);
3324 mgmt_pending_remove(cmd);
3330 hci_dev_unlock(hdev);
3334 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3337 struct mgmt_addr_info *addr = data;
3338 struct pending_cmd *cmd;
3339 struct hci_conn *conn;
3346 if (!hdev_is_powered(hdev)) {
3347 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3348 MGMT_STATUS_NOT_POWERED);
3352 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3354 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3355 MGMT_STATUS_INVALID_PARAMS);
3359 conn = cmd->user_data;
3361 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3362 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3363 MGMT_STATUS_INVALID_PARAMS);
3367 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3368 mgmt_pending_remove(cmd);
3370 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3371 addr, sizeof(*addr));
3373 hci_dev_unlock(hdev);
3377 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3378 struct mgmt_addr_info *addr, u16 mgmt_op,
3379 u16 hci_op, __le32 passkey)
3381 struct pending_cmd *cmd;
3382 struct hci_conn *conn;
3387 if (!hdev_is_powered(hdev)) {
3388 err = cmd_complete(sk, hdev->id, mgmt_op,
3389 MGMT_STATUS_NOT_POWERED, addr,
3394 if (addr->type == BDADDR_BREDR)
3395 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3397 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3400 err = cmd_complete(sk, hdev->id, mgmt_op,
3401 MGMT_STATUS_NOT_CONNECTED, addr,
3406 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3407 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3409 err = cmd_complete(sk, hdev->id, mgmt_op,
3410 MGMT_STATUS_SUCCESS, addr,
3413 err = cmd_complete(sk, hdev->id, mgmt_op,
3414 MGMT_STATUS_FAILED, addr,
3420 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3426 cmd->cmd_complete = addr_cmd_complete;
3428 /* Continue with pairing via HCI */
3429 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3430 struct hci_cp_user_passkey_reply cp;
3432 bacpy(&cp.bdaddr, &addr->bdaddr);
3433 cp.passkey = passkey;
3434 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3436 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3440 mgmt_pending_remove(cmd);
3443 hci_dev_unlock(hdev);
3447 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3448 void *data, u16 len)
3450 struct mgmt_cp_pin_code_neg_reply *cp = data;
3454 return user_pairing_resp(sk, hdev, &cp->addr,
3455 MGMT_OP_PIN_CODE_NEG_REPLY,
3456 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3459 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3462 struct mgmt_cp_user_confirm_reply *cp = data;
3466 if (len != sizeof(*cp))
3467 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3468 MGMT_STATUS_INVALID_PARAMS);
3470 return user_pairing_resp(sk, hdev, &cp->addr,
3471 MGMT_OP_USER_CONFIRM_REPLY,
3472 HCI_OP_USER_CONFIRM_REPLY, 0);
3475 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3476 void *data, u16 len)
3478 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3482 return user_pairing_resp(sk, hdev, &cp->addr,
3483 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3484 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3487 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3490 struct mgmt_cp_user_passkey_reply *cp = data;
3494 return user_pairing_resp(sk, hdev, &cp->addr,
3495 MGMT_OP_USER_PASSKEY_REPLY,
3496 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3499 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3500 void *data, u16 len)
3502 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3506 return user_pairing_resp(sk, hdev, &cp->addr,
3507 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3508 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3511 static void update_name(struct hci_request *req)
3513 struct hci_dev *hdev = req->hdev;
3514 struct hci_cp_write_local_name cp;
3516 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3518 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3521 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3523 struct mgmt_cp_set_local_name *cp;
3524 struct pending_cmd *cmd;
3526 BT_DBG("status 0x%02x", status);
3530 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3537 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3538 mgmt_status(status));
3540 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3543 mgmt_pending_remove(cmd);
3546 hci_dev_unlock(hdev);
3549 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3552 struct mgmt_cp_set_local_name *cp = data;
3553 struct pending_cmd *cmd;
3554 struct hci_request req;
3561 /* If the old values are the same as the new ones just return a
3562 * direct command complete event.
3564 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3565 !memcmp(hdev->short_name, cp->short_name,
3566 sizeof(hdev->short_name))) {
3567 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3572 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3574 if (!hdev_is_powered(hdev)) {
3575 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3577 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3582 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3588 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3594 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3596 hci_req_init(&req, hdev);
3598 if (lmp_bredr_capable(hdev)) {
3603 /* The name is stored in the scan response data and so
3604 * no need to udpate the advertising data here.
3606 if (lmp_le_capable(hdev))
3607 update_scan_rsp_data(&req);
3609 err = hci_req_run(&req, set_name_complete);
3611 mgmt_pending_remove(cmd);
3614 hci_dev_unlock(hdev);
3618 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3619 void *data, u16 data_len)
3621 struct pending_cmd *cmd;
3624 BT_DBG("%s", hdev->name);
3628 if (!hdev_is_powered(hdev)) {
3629 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3630 MGMT_STATUS_NOT_POWERED);
3634 if (!lmp_ssp_capable(hdev)) {
3635 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3636 MGMT_STATUS_NOT_SUPPORTED);
3640 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3641 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3646 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3652 if (bredr_sc_enabled(hdev))
3653 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3656 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3659 mgmt_pending_remove(cmd);
3662 hci_dev_unlock(hdev);
3666 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3667 void *data, u16 len)
3669 struct mgmt_addr_info *addr = data;
3672 BT_DBG("%s ", hdev->name);
3674 if (!bdaddr_type_is_valid(addr->type))
3675 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3676 MGMT_STATUS_INVALID_PARAMS, addr,
3681 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3682 struct mgmt_cp_add_remote_oob_data *cp = data;
3685 if (cp->addr.type != BDADDR_BREDR) {
3686 err = cmd_complete(sk, hdev->id,
3687 MGMT_OP_ADD_REMOTE_OOB_DATA,
3688 MGMT_STATUS_INVALID_PARAMS,
3689 &cp->addr, sizeof(cp->addr));
3693 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3694 cp->addr.type, cp->hash,
3695 cp->rand, NULL, NULL);
3697 status = MGMT_STATUS_FAILED;
3699 status = MGMT_STATUS_SUCCESS;
3701 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3702 status, &cp->addr, sizeof(cp->addr));
3703 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3704 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3705 u8 *rand192, *hash192, *rand256, *hash256;
3708 if (bdaddr_type_is_le(cp->addr.type)) {
3709 /* Enforce zero-valued 192-bit parameters as
3710 * long as legacy SMP OOB isn't implemented.
3712 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3713 memcmp(cp->hash192, ZERO_KEY, 16)) {
3714 err = cmd_complete(sk, hdev->id,
3715 MGMT_OP_ADD_REMOTE_OOB_DATA,
3716 MGMT_STATUS_INVALID_PARAMS,
3717 addr, sizeof(*addr));
3724 /* In case one of the P-192 values is set to zero,
3725 * then just disable OOB data for P-192.
3727 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3728 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3732 rand192 = cp->rand192;
3733 hash192 = cp->hash192;
3737 /* In case one of the P-256 values is set to zero, then just
3738 * disable OOB data for P-256.
3740 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3741 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3745 rand256 = cp->rand256;
3746 hash256 = cp->hash256;
3749 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3750 cp->addr.type, hash192, rand192,
3753 status = MGMT_STATUS_FAILED;
3755 status = MGMT_STATUS_SUCCESS;
3757 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3758 status, &cp->addr, sizeof(cp->addr));
3760 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3761 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3762 MGMT_STATUS_INVALID_PARAMS);
3766 hci_dev_unlock(hdev);
3770 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3771 void *data, u16 len)
3773 struct mgmt_cp_remove_remote_oob_data *cp = data;
3777 BT_DBG("%s", hdev->name);
3779 if (cp->addr.type != BDADDR_BREDR)
3780 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3781 MGMT_STATUS_INVALID_PARAMS,
3782 &cp->addr, sizeof(cp->addr));
3786 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3787 hci_remote_oob_data_clear(hdev);
3788 status = MGMT_STATUS_SUCCESS;
3792 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3794 status = MGMT_STATUS_INVALID_PARAMS;
3796 status = MGMT_STATUS_SUCCESS;
3799 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3800 status, &cp->addr, sizeof(cp->addr));
3802 hci_dev_unlock(hdev);
3806 static bool trigger_discovery(struct hci_request *req, u8 *status)
3808 struct hci_dev *hdev = req->hdev;
3809 struct hci_cp_le_set_scan_param param_cp;
3810 struct hci_cp_le_set_scan_enable enable_cp;
3811 struct hci_cp_inquiry inq_cp;
3812 /* General inquiry access code (GIAC) */
3813 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3817 switch (hdev->discovery.type) {
3818 case DISCOV_TYPE_BREDR:
3819 *status = mgmt_bredr_support(hdev);
3823 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3824 *status = MGMT_STATUS_BUSY;
3828 hci_inquiry_cache_flush(hdev);
3830 memset(&inq_cp, 0, sizeof(inq_cp));
3831 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3832 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3833 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3836 case DISCOV_TYPE_LE:
3837 case DISCOV_TYPE_INTERLEAVED:
3838 *status = mgmt_le_support(hdev);
3842 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3843 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3844 *status = MGMT_STATUS_NOT_SUPPORTED;
3848 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3849 /* Don't let discovery abort an outgoing
3850 * connection attempt that's using directed
3853 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3855 *status = MGMT_STATUS_REJECTED;
3859 disable_advertising(req);
3862 /* If controller is scanning, it means the background scanning
3863 * is running. Thus, we should temporarily stop it in order to
3864 * set the discovery scanning parameters.
3866 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3867 hci_req_add_le_scan_disable(req);
3869 memset(¶m_cp, 0, sizeof(param_cp));
3871 /* All active scans will be done with either a resolvable
3872 * private address (when privacy feature has been enabled)
3873 * or non-resolvable private address.
3875 err = hci_update_random_address(req, true, &own_addr_type);
3877 *status = MGMT_STATUS_FAILED;
3881 param_cp.type = LE_SCAN_ACTIVE;
3882 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3883 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3884 param_cp.own_address_type = own_addr_type;
3885 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3888 memset(&enable_cp, 0, sizeof(enable_cp));
3889 enable_cp.enable = LE_SCAN_ENABLE;
3890 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3891 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3896 *status = MGMT_STATUS_INVALID_PARAMS;
3903 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3906 struct pending_cmd *cmd;
3907 unsigned long timeout;
3909 BT_DBG("status %d", status);
3913 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3915 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3918 cmd->cmd_complete(cmd, mgmt_status(status));
3919 mgmt_pending_remove(cmd);
3923 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3927 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3929 /* If the scan involves LE scan, pick proper timeout to schedule
3930 * hdev->le_scan_disable that will stop it.
3932 switch (hdev->discovery.type) {
3933 case DISCOV_TYPE_LE:
3934 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3936 case DISCOV_TYPE_INTERLEAVED:
3937 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3939 case DISCOV_TYPE_BREDR:
3943 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3949 /* When service discovery is used and the controller has
3950 * a strict duplicate filter, it is important to remember
3951 * the start and duration of the scan. This is required
3952 * for restarting scanning during the discovery phase.
3954 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
3956 hdev->discovery.result_filtering) {
3957 hdev->discovery.scan_start = jiffies;
3958 hdev->discovery.scan_duration = timeout;
3961 queue_delayed_work(hdev->workqueue,
3962 &hdev->le_scan_disable, timeout);
3966 hci_dev_unlock(hdev);
3969 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3970 void *data, u16 len)
3972 struct mgmt_cp_start_discovery *cp = data;
3973 struct pending_cmd *cmd;
3974 struct hci_request req;
3978 BT_DBG("%s", hdev->name);
3982 if (!hdev_is_powered(hdev)) {
3983 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3984 MGMT_STATUS_NOT_POWERED,
3985 &cp->type, sizeof(cp->type));
3989 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3990 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3991 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3992 MGMT_STATUS_BUSY, &cp->type,
3997 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4003 cmd->cmd_complete = generic_cmd_complete;
4005 /* Clear the discovery filter first to free any previously
4006 * allocated memory for the UUID list.
4008 hci_discovery_filter_clear(hdev);
4010 hdev->discovery.type = cp->type;
4011 hdev->discovery.report_invalid_rssi = false;
4013 hci_req_init(&req, hdev);
4015 if (!trigger_discovery(&req, &status)) {
4016 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4017 status, &cp->type, sizeof(cp->type));
4018 mgmt_pending_remove(cmd);
4022 err = hci_req_run(&req, start_discovery_complete);
4024 mgmt_pending_remove(cmd);
4028 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4031 hci_dev_unlock(hdev);
4035 static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
4037 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4041 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4042 void *data, u16 len)
4044 struct mgmt_cp_start_service_discovery *cp = data;
4045 struct pending_cmd *cmd;
4046 struct hci_request req;
4047 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4048 u16 uuid_count, expected_len;
4052 BT_DBG("%s", hdev->name);
4056 if (!hdev_is_powered(hdev)) {
4057 err = cmd_complete(sk, hdev->id,
4058 MGMT_OP_START_SERVICE_DISCOVERY,
4059 MGMT_STATUS_NOT_POWERED,
4060 &cp->type, sizeof(cp->type));
4064 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4065 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
4066 err = cmd_complete(sk, hdev->id,
4067 MGMT_OP_START_SERVICE_DISCOVERY,
4068 MGMT_STATUS_BUSY, &cp->type,
4073 uuid_count = __le16_to_cpu(cp->uuid_count);
4074 if (uuid_count > max_uuid_count) {
4075 BT_ERR("service_discovery: too big uuid_count value %u",
4077 err = cmd_complete(sk, hdev->id,
4078 MGMT_OP_START_SERVICE_DISCOVERY,
4079 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4084 expected_len = sizeof(*cp) + uuid_count * 16;
4085 if (expected_len != len) {
4086 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4088 err = cmd_complete(sk, hdev->id,
4089 MGMT_OP_START_SERVICE_DISCOVERY,
4090 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4095 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4102 cmd->cmd_complete = service_discovery_cmd_complete;
4104 /* Clear the discovery filter first to free any previously
4105 * allocated memory for the UUID list.
4107 hci_discovery_filter_clear(hdev);
4109 hdev->discovery.result_filtering = true;
4110 hdev->discovery.type = cp->type;
4111 hdev->discovery.rssi = cp->rssi;
4112 hdev->discovery.uuid_count = uuid_count;
4114 if (uuid_count > 0) {
4115 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4117 if (!hdev->discovery.uuids) {
4118 err = cmd_complete(sk, hdev->id,
4119 MGMT_OP_START_SERVICE_DISCOVERY,
4121 &cp->type, sizeof(cp->type));
4122 mgmt_pending_remove(cmd);
4127 hci_req_init(&req, hdev);
4129 if (!trigger_discovery(&req, &status)) {
4130 err = cmd_complete(sk, hdev->id,
4131 MGMT_OP_START_SERVICE_DISCOVERY,
4132 status, &cp->type, sizeof(cp->type));
4133 mgmt_pending_remove(cmd);
4137 err = hci_req_run(&req, start_discovery_complete);
4139 mgmt_pending_remove(cmd);
4143 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4146 hci_dev_unlock(hdev);
4150 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4152 struct pending_cmd *cmd;
4154 BT_DBG("status %d", status);
4158 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4160 cmd->cmd_complete(cmd, mgmt_status(status));
4161 mgmt_pending_remove(cmd);
4165 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4167 hci_dev_unlock(hdev);
4170 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4173 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4174 struct pending_cmd *cmd;
4175 struct hci_request req;
4178 BT_DBG("%s", hdev->name);
4182 if (!hci_discovery_active(hdev)) {
4183 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4184 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4185 sizeof(mgmt_cp->type));
4189 if (hdev->discovery.type != mgmt_cp->type) {
4190 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4191 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4192 sizeof(mgmt_cp->type));
4196 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4202 cmd->cmd_complete = generic_cmd_complete;
4204 hci_req_init(&req, hdev);
4206 hci_stop_discovery(&req);
4208 err = hci_req_run(&req, stop_discovery_complete);
4210 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4214 mgmt_pending_remove(cmd);
4216 /* If no HCI commands were sent we're done */
4217 if (err == -ENODATA) {
4218 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4219 &mgmt_cp->type, sizeof(mgmt_cp->type));
4220 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4224 hci_dev_unlock(hdev);
4228 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4231 struct mgmt_cp_confirm_name *cp = data;
4232 struct inquiry_entry *e;
4235 BT_DBG("%s", hdev->name);
4239 if (!hci_discovery_active(hdev)) {
4240 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4241 MGMT_STATUS_FAILED, &cp->addr,
4246 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4248 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4249 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4254 if (cp->name_known) {
4255 e->name_state = NAME_KNOWN;
4258 e->name_state = NAME_NEEDED;
4259 hci_inquiry_cache_update_resolve(hdev, e);
4262 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4266 hci_dev_unlock(hdev);
4270 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4273 struct mgmt_cp_block_device *cp = data;
4277 BT_DBG("%s", hdev->name);
4279 if (!bdaddr_type_is_valid(cp->addr.type))
4280 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4281 MGMT_STATUS_INVALID_PARAMS,
4282 &cp->addr, sizeof(cp->addr));
4286 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4289 status = MGMT_STATUS_FAILED;
4293 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4295 status = MGMT_STATUS_SUCCESS;
4298 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4299 &cp->addr, sizeof(cp->addr));
4301 hci_dev_unlock(hdev);
4306 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4309 struct mgmt_cp_unblock_device *cp = data;
4313 BT_DBG("%s", hdev->name);
4315 if (!bdaddr_type_is_valid(cp->addr.type))
4316 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4317 MGMT_STATUS_INVALID_PARAMS,
4318 &cp->addr, sizeof(cp->addr));
4322 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4325 status = MGMT_STATUS_INVALID_PARAMS;
4329 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4331 status = MGMT_STATUS_SUCCESS;
4334 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4335 &cp->addr, sizeof(cp->addr));
4337 hci_dev_unlock(hdev);
4342 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4345 struct mgmt_cp_set_device_id *cp = data;
4346 struct hci_request req;
4350 BT_DBG("%s", hdev->name);
4352 source = __le16_to_cpu(cp->source);
4354 if (source > 0x0002)
4355 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4356 MGMT_STATUS_INVALID_PARAMS);
4360 hdev->devid_source = source;
4361 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4362 hdev->devid_product = __le16_to_cpu(cp->product);
4363 hdev->devid_version = __le16_to_cpu(cp->version);
4365 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4367 hci_req_init(&req, hdev);
4369 hci_req_run(&req, NULL);
4371 hci_dev_unlock(hdev);
4376 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4379 struct cmd_lookup match = { NULL, hdev };
4384 u8 mgmt_err = mgmt_status(status);
4386 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4387 cmd_status_rsp, &mgmt_err);
4391 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4392 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4394 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4396 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4399 new_settings(hdev, match.sk);
4405 hci_dev_unlock(hdev);
4408 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4411 struct mgmt_mode *cp = data;
4412 struct pending_cmd *cmd;
4413 struct hci_request req;
4414 u8 val, enabled, status;
4417 BT_DBG("request for %s", hdev->name);
4419 status = mgmt_le_support(hdev);
4421 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4424 if (cp->val != 0x00 && cp->val != 0x01)
4425 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4426 MGMT_STATUS_INVALID_PARAMS);
4431 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4433 /* The following conditions are ones which mean that we should
4434 * not do any HCI communication but directly send a mgmt
4435 * response to user space (after toggling the flag if
4438 if (!hdev_is_powered(hdev) || val == enabled ||
4439 hci_conn_num(hdev, LE_LINK) > 0 ||
4440 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4441 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4442 bool changed = false;
4444 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4445 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4449 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4454 err = new_settings(hdev, sk);
4459 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4460 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4461 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4466 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4472 hci_req_init(&req, hdev);
4475 enable_advertising(&req);
4477 disable_advertising(&req);
4479 err = hci_req_run(&req, set_advertising_complete);
4481 mgmt_pending_remove(cmd);
4484 hci_dev_unlock(hdev);
4488 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4489 void *data, u16 len)
4491 struct mgmt_cp_set_static_address *cp = data;
4494 BT_DBG("%s", hdev->name);
4496 if (!lmp_le_capable(hdev))
4497 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4498 MGMT_STATUS_NOT_SUPPORTED);
4500 if (hdev_is_powered(hdev))
4501 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4502 MGMT_STATUS_REJECTED);
4504 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4505 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4506 return cmd_status(sk, hdev->id,
4507 MGMT_OP_SET_STATIC_ADDRESS,
4508 MGMT_STATUS_INVALID_PARAMS);
4510 /* Two most significant bits shall be set */
4511 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4512 return cmd_status(sk, hdev->id,
4513 MGMT_OP_SET_STATIC_ADDRESS,
4514 MGMT_STATUS_INVALID_PARAMS);
4519 bacpy(&hdev->static_addr, &cp->bdaddr);
4521 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4525 err = new_settings(hdev, sk);
4528 hci_dev_unlock(hdev);
4532 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4533 void *data, u16 len)
4535 struct mgmt_cp_set_scan_params *cp = data;
4536 __u16 interval, window;
4539 BT_DBG("%s", hdev->name);
4541 if (!lmp_le_capable(hdev))
4542 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4543 MGMT_STATUS_NOT_SUPPORTED);
4545 interval = __le16_to_cpu(cp->interval);
4547 if (interval < 0x0004 || interval > 0x4000)
4548 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4549 MGMT_STATUS_INVALID_PARAMS);
4551 window = __le16_to_cpu(cp->window);
4553 if (window < 0x0004 || window > 0x4000)
4554 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4555 MGMT_STATUS_INVALID_PARAMS);
4557 if (window > interval)
4558 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4559 MGMT_STATUS_INVALID_PARAMS);
4563 hdev->le_scan_interval = interval;
4564 hdev->le_scan_window = window;
4566 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4568 /* If background scan is running, restart it so new parameters are
4571 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4572 hdev->discovery.state == DISCOVERY_STOPPED) {
4573 struct hci_request req;
4575 hci_req_init(&req, hdev);
4577 hci_req_add_le_scan_disable(&req);
4578 hci_req_add_le_passive_scan(&req);
4580 hci_req_run(&req, NULL);
4583 hci_dev_unlock(hdev);
4588 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4591 struct pending_cmd *cmd;
4593 BT_DBG("status 0x%02x", status);
4597 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4602 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4603 mgmt_status(status));
4605 struct mgmt_mode *cp = cmd->param;
4608 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4610 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4612 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4613 new_settings(hdev, cmd->sk);
4616 mgmt_pending_remove(cmd);
4619 hci_dev_unlock(hdev);
4622 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4623 void *data, u16 len)
4625 struct mgmt_mode *cp = data;
4626 struct pending_cmd *cmd;
4627 struct hci_request req;
4630 BT_DBG("%s", hdev->name);
4632 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4633 hdev->hci_ver < BLUETOOTH_VER_1_2)
4634 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4635 MGMT_STATUS_NOT_SUPPORTED);
4637 if (cp->val != 0x00 && cp->val != 0x01)
4638 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4639 MGMT_STATUS_INVALID_PARAMS);
4641 if (!hdev_is_powered(hdev))
4642 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4643 MGMT_STATUS_NOT_POWERED);
4645 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4646 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4647 MGMT_STATUS_REJECTED);
4651 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4652 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4657 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4658 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4663 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4670 hci_req_init(&req, hdev);
4672 write_fast_connectable(&req, cp->val);
4674 err = hci_req_run(&req, fast_connectable_complete);
4676 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4677 MGMT_STATUS_FAILED);
4678 mgmt_pending_remove(cmd);
4682 hci_dev_unlock(hdev);
4687 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4689 struct pending_cmd *cmd;
4691 BT_DBG("status 0x%02x", status);
4695 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4700 u8 mgmt_err = mgmt_status(status);
4702 /* We need to restore the flag if related HCI commands
4705 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4707 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4709 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4710 new_settings(hdev, cmd->sk);
4713 mgmt_pending_remove(cmd);
4716 hci_dev_unlock(hdev);
4719 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4721 struct mgmt_mode *cp = data;
4722 struct pending_cmd *cmd;
4723 struct hci_request req;
4726 BT_DBG("request for %s", hdev->name);
4728 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4729 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4730 MGMT_STATUS_NOT_SUPPORTED);
4732 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4733 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4734 MGMT_STATUS_REJECTED);
4736 if (cp->val != 0x00 && cp->val != 0x01)
4737 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4738 MGMT_STATUS_INVALID_PARAMS);
4742 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4743 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4747 if (!hdev_is_powered(hdev)) {
4749 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4750 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4751 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4752 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4753 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4756 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4758 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4762 err = new_settings(hdev, sk);
4766 /* Reject disabling when powered on */
4768 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4769 MGMT_STATUS_REJECTED);
4772 /* When configuring a dual-mode controller to operate
4773 * with LE only and using a static address, then switching
4774 * BR/EDR back on is not allowed.
4776 * Dual-mode controllers shall operate with the public
4777 * address as its identity address for BR/EDR and LE. So
4778 * reject the attempt to create an invalid configuration.
4780 * The same restrictions applies when secure connections
4781 * has been enabled. For BR/EDR this is a controller feature
4782 * while for LE it is a host stack feature. This means that
4783 * switching BR/EDR back on when secure connections has been
4784 * enabled is not a supported transaction.
4786 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4787 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4788 test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) {
4789 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4790 MGMT_STATUS_REJECTED);
4795 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4796 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4801 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4807 /* We need to flip the bit already here so that update_adv_data
4808 * generates the correct flags.
4810 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4812 hci_req_init(&req, hdev);
4814 write_fast_connectable(&req, false);
4815 __hci_update_page_scan(&req);
4817 /* Since only the advertising data flags will change, there
4818 * is no need to update the scan response data.
4820 update_adv_data(&req);
4822 err = hci_req_run(&req, set_bredr_complete);
4824 mgmt_pending_remove(cmd);
4827 hci_dev_unlock(hdev);
4831 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4833 struct pending_cmd *cmd;
4834 struct mgmt_mode *cp;
4836 BT_DBG("%s status %u", hdev->name, status);
4840 cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4845 cmd_status(cmd->sk, cmd->index, cmd->opcode,
4846 mgmt_status(status));
4854 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4855 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4858 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4859 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4862 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4863 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4867 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4868 new_settings(hdev, cmd->sk);
4871 mgmt_pending_remove(cmd);
4873 hci_dev_unlock(hdev);
4876 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4877 void *data, u16 len)
4879 struct mgmt_mode *cp = data;
4880 struct pending_cmd *cmd;
4881 struct hci_request req;
4885 BT_DBG("request for %s", hdev->name);
4887 if (!lmp_sc_capable(hdev) &&
4888 !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4889 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4890 MGMT_STATUS_NOT_SUPPORTED);
4892 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4893 lmp_sc_capable(hdev) &&
4894 !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4895 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4896 MGMT_STATUS_REJECTED);
4898 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4899 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4900 MGMT_STATUS_INVALID_PARAMS);
4904 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4905 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4909 changed = !test_and_set_bit(HCI_SC_ENABLED,
4911 if (cp->val == 0x02)
4912 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4914 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4916 changed = test_and_clear_bit(HCI_SC_ENABLED,
4918 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4921 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4926 err = new_settings(hdev, sk);
4931 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4932 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4939 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4940 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4941 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4945 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4951 hci_req_init(&req, hdev);
4952 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4953 err = hci_req_run(&req, sc_enable_complete);
4955 mgmt_pending_remove(cmd);
4960 hci_dev_unlock(hdev);
4964 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4965 void *data, u16 len)
4967 struct mgmt_mode *cp = data;
4968 bool changed, use_changed;
4971 BT_DBG("request for %s", hdev->name);
4973 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4974 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4975 MGMT_STATUS_INVALID_PARAMS);
4980 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4983 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4986 if (cp->val == 0x02)
4987 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4990 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4993 if (hdev_is_powered(hdev) && use_changed &&
4994 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4995 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4996 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4997 sizeof(mode), &mode);
5000 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5005 err = new_settings(hdev, sk);
5008 hci_dev_unlock(hdev);
5012 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5015 struct mgmt_cp_set_privacy *cp = cp_data;
5019 BT_DBG("request for %s", hdev->name);
5021 if (!lmp_le_capable(hdev))
5022 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5023 MGMT_STATUS_NOT_SUPPORTED);
5025 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5026 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5027 MGMT_STATUS_INVALID_PARAMS);
5029 if (hdev_is_powered(hdev))
5030 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5031 MGMT_STATUS_REJECTED);
5035 /* If user space supports this command it is also expected to
5036 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5038 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5041 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
5042 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5043 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
5045 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
5046 memset(hdev->irk, 0, sizeof(hdev->irk));
5047 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
5050 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5055 err = new_settings(hdev, sk);
5058 hci_dev_unlock(hdev);
5062 static bool irk_is_valid(struct mgmt_irk_info *irk)
5064 switch (irk->addr.type) {
5065 case BDADDR_LE_PUBLIC:
5068 case BDADDR_LE_RANDOM:
5069 /* Two most significant bits shall be set */
5070 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5078 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5081 struct mgmt_cp_load_irks *cp = cp_data;
5082 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5083 sizeof(struct mgmt_irk_info));
5084 u16 irk_count, expected_len;
5087 BT_DBG("request for %s", hdev->name);
5089 if (!lmp_le_capable(hdev))
5090 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5091 MGMT_STATUS_NOT_SUPPORTED);
5093 irk_count = __le16_to_cpu(cp->irk_count);
5094 if (irk_count > max_irk_count) {
5095 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5096 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5097 MGMT_STATUS_INVALID_PARAMS);
5100 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5101 if (expected_len != len) {
5102 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5104 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5105 MGMT_STATUS_INVALID_PARAMS);
5108 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5110 for (i = 0; i < irk_count; i++) {
5111 struct mgmt_irk_info *key = &cp->irks[i];
5113 if (!irk_is_valid(key))
5114 return cmd_status(sk, hdev->id,
5116 MGMT_STATUS_INVALID_PARAMS);
5121 hci_smp_irks_clear(hdev);
5123 for (i = 0; i < irk_count; i++) {
5124 struct mgmt_irk_info *irk = &cp->irks[i];
5127 if (irk->addr.type == BDADDR_LE_PUBLIC)
5128 addr_type = ADDR_LE_DEV_PUBLIC;
5130 addr_type = ADDR_LE_DEV_RANDOM;
5132 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5136 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5138 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5140 hci_dev_unlock(hdev);
5145 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5147 if (key->master != 0x00 && key->master != 0x01)
5150 switch (key->addr.type) {
5151 case BDADDR_LE_PUBLIC:
5154 case BDADDR_LE_RANDOM:
5155 /* Two most significant bits shall be set */
5156 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5164 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5165 void *cp_data, u16 len)
5167 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5168 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5169 sizeof(struct mgmt_ltk_info));
5170 u16 key_count, expected_len;
5173 BT_DBG("request for %s", hdev->name);
5175 if (!lmp_le_capable(hdev))
5176 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5177 MGMT_STATUS_NOT_SUPPORTED);
5179 key_count = __le16_to_cpu(cp->key_count);
5180 if (key_count > max_key_count) {
5181 BT_ERR("load_ltks: too big key_count value %u", key_count);
5182 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5183 MGMT_STATUS_INVALID_PARAMS);
5186 expected_len = sizeof(*cp) + key_count *
5187 sizeof(struct mgmt_ltk_info);
5188 if (expected_len != len) {
5189 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5191 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5192 MGMT_STATUS_INVALID_PARAMS);
5195 BT_DBG("%s key_count %u", hdev->name, key_count);
5197 for (i = 0; i < key_count; i++) {
5198 struct mgmt_ltk_info *key = &cp->keys[i];
5200 if (!ltk_is_valid(key))
5201 return cmd_status(sk, hdev->id,
5202 MGMT_OP_LOAD_LONG_TERM_KEYS,
5203 MGMT_STATUS_INVALID_PARAMS);
5208 hci_smp_ltks_clear(hdev);
5210 for (i = 0; i < key_count; i++) {
5211 struct mgmt_ltk_info *key = &cp->keys[i];
5212 u8 type, addr_type, authenticated;
5214 if (key->addr.type == BDADDR_LE_PUBLIC)
5215 addr_type = ADDR_LE_DEV_PUBLIC;
5217 addr_type = ADDR_LE_DEV_RANDOM;
5219 switch (key->type) {
5220 case MGMT_LTK_UNAUTHENTICATED:
5221 authenticated = 0x00;
5222 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5224 case MGMT_LTK_AUTHENTICATED:
5225 authenticated = 0x01;
5226 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5228 case MGMT_LTK_P256_UNAUTH:
5229 authenticated = 0x00;
5230 type = SMP_LTK_P256;
5232 case MGMT_LTK_P256_AUTH:
5233 authenticated = 0x01;
5234 type = SMP_LTK_P256;
5236 case MGMT_LTK_P256_DEBUG:
5237 authenticated = 0x00;
5238 type = SMP_LTK_P256_DEBUG;
5243 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5244 authenticated, key->val, key->enc_size, key->ediv,
5248 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5251 hci_dev_unlock(hdev);
5256 static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5258 struct hci_conn *conn = cmd->user_data;
5259 struct mgmt_rp_get_conn_info rp;
5262 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5264 if (status == MGMT_STATUS_SUCCESS) {
5265 rp.rssi = conn->rssi;
5266 rp.tx_power = conn->tx_power;
5267 rp.max_tx_power = conn->max_tx_power;
5269 rp.rssi = HCI_RSSI_INVALID;
5270 rp.tx_power = HCI_TX_POWER_INVALID;
5271 rp.max_tx_power = HCI_TX_POWER_INVALID;
5274 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5277 hci_conn_drop(conn);
5283 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5286 struct hci_cp_read_rssi *cp;
5287 struct pending_cmd *cmd;
5288 struct hci_conn *conn;
5292 BT_DBG("status 0x%02x", hci_status);
5296 /* Commands sent in request are either Read RSSI or Read Transmit Power
5297 * Level so we check which one was last sent to retrieve connection
5298 * handle. Both commands have handle as first parameter so it's safe to
5299 * cast data on the same command struct.
5301 * First command sent is always Read RSSI and we fail only if it fails.
5302 * In other case we simply override error to indicate success as we
5303 * already remembered if TX power value is actually valid.
5305 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5307 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5308 status = MGMT_STATUS_SUCCESS;
5310 status = mgmt_status(hci_status);
5314 BT_ERR("invalid sent_cmd in conn_info response");
5318 handle = __le16_to_cpu(cp->handle);
5319 conn = hci_conn_hash_lookup_handle(hdev, handle);
5321 BT_ERR("unknown handle (%d) in conn_info response", handle);
5325 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5329 cmd->cmd_complete(cmd, status);
5330 mgmt_pending_remove(cmd);
5333 hci_dev_unlock(hdev);
5336 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5339 struct mgmt_cp_get_conn_info *cp = data;
5340 struct mgmt_rp_get_conn_info rp;
5341 struct hci_conn *conn;
5342 unsigned long conn_info_age;
5345 BT_DBG("%s", hdev->name);
5347 memset(&rp, 0, sizeof(rp));
5348 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5349 rp.addr.type = cp->addr.type;
5351 if (!bdaddr_type_is_valid(cp->addr.type))
5352 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5353 MGMT_STATUS_INVALID_PARAMS,
5358 if (!hdev_is_powered(hdev)) {
5359 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5360 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5364 if (cp->addr.type == BDADDR_BREDR)
5365 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5368 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5370 if (!conn || conn->state != BT_CONNECTED) {
5371 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5372 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5376 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5377 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5378 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5382 /* To avoid client trying to guess when to poll again for information we
5383 * calculate conn info age as random value between min/max set in hdev.
5385 conn_info_age = hdev->conn_info_min_age +
5386 prandom_u32_max(hdev->conn_info_max_age -
5387 hdev->conn_info_min_age);
5389 /* Query controller to refresh cached values if they are too old or were
5392 if (time_after(jiffies, conn->conn_info_timestamp +
5393 msecs_to_jiffies(conn_info_age)) ||
5394 !conn->conn_info_timestamp) {
5395 struct hci_request req;
5396 struct hci_cp_read_tx_power req_txp_cp;
5397 struct hci_cp_read_rssi req_rssi_cp;
5398 struct pending_cmd *cmd;
5400 hci_req_init(&req, hdev);
5401 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5402 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5405 /* For LE links TX power does not change thus we don't need to
5406 * query for it once value is known.
5408 if (!bdaddr_type_is_le(cp->addr.type) ||
5409 conn->tx_power == HCI_TX_POWER_INVALID) {
5410 req_txp_cp.handle = cpu_to_le16(conn->handle);
5411 req_txp_cp.type = 0x00;
5412 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5413 sizeof(req_txp_cp), &req_txp_cp);
5416 /* Max TX power needs to be read only once per connection */
5417 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5418 req_txp_cp.handle = cpu_to_le16(conn->handle);
5419 req_txp_cp.type = 0x01;
5420 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5421 sizeof(req_txp_cp), &req_txp_cp);
5424 err = hci_req_run(&req, conn_info_refresh_complete);
5428 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5435 hci_conn_hold(conn);
5436 cmd->user_data = hci_conn_get(conn);
5437 cmd->cmd_complete = conn_info_cmd_complete;
5439 conn->conn_info_timestamp = jiffies;
5441 /* Cache is valid, just reply with values cached in hci_conn */
5442 rp.rssi = conn->rssi;
5443 rp.tx_power = conn->tx_power;
5444 rp.max_tx_power = conn->max_tx_power;
5446 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5447 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5451 hci_dev_unlock(hdev);
5455 static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5457 struct hci_conn *conn = cmd->user_data;
5458 struct mgmt_rp_get_clock_info rp;
5459 struct hci_dev *hdev;
5462 memset(&rp, 0, sizeof(rp));
5463 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5468 hdev = hci_dev_get(cmd->index);
5470 rp.local_clock = cpu_to_le32(hdev->clock);
5475 rp.piconet_clock = cpu_to_le32(conn->clock);
5476 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5480 err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5484 hci_conn_drop(conn);
5491 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5493 struct hci_cp_read_clock *hci_cp;
5494 struct pending_cmd *cmd;
5495 struct hci_conn *conn;
5497 BT_DBG("%s status %u", hdev->name, status);
5501 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5505 if (hci_cp->which) {
5506 u16 handle = __le16_to_cpu(hci_cp->handle);
5507 conn = hci_conn_hash_lookup_handle(hdev, handle);
5512 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5516 cmd->cmd_complete(cmd, mgmt_status(status));
5517 mgmt_pending_remove(cmd);
5520 hci_dev_unlock(hdev);
5523 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5526 struct mgmt_cp_get_clock_info *cp = data;
5527 struct mgmt_rp_get_clock_info rp;
5528 struct hci_cp_read_clock hci_cp;
5529 struct pending_cmd *cmd;
5530 struct hci_request req;
5531 struct hci_conn *conn;
5534 BT_DBG("%s", hdev->name);
5536 memset(&rp, 0, sizeof(rp));
5537 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5538 rp.addr.type = cp->addr.type;
5540 if (cp->addr.type != BDADDR_BREDR)
5541 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5542 MGMT_STATUS_INVALID_PARAMS,
5547 if (!hdev_is_powered(hdev)) {
5548 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5549 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5553 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5554 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5556 if (!conn || conn->state != BT_CONNECTED) {
5557 err = cmd_complete(sk, hdev->id,
5558 MGMT_OP_GET_CLOCK_INFO,
5559 MGMT_STATUS_NOT_CONNECTED,
5567 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5573 cmd->cmd_complete = clock_info_cmd_complete;
5575 hci_req_init(&req, hdev);
5577 memset(&hci_cp, 0, sizeof(hci_cp));
5578 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5581 hci_conn_hold(conn);
5582 cmd->user_data = hci_conn_get(conn);
5584 hci_cp.handle = cpu_to_le16(conn->handle);
5585 hci_cp.which = 0x01; /* Piconet clock */
5586 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5589 err = hci_req_run(&req, get_clock_info_complete);
5591 mgmt_pending_remove(cmd);
5594 hci_dev_unlock(hdev);
5598 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5600 struct hci_conn *conn;
5602 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5606 if (conn->dst_type != type)
5609 if (conn->state != BT_CONNECTED)
5615 /* This function requires the caller holds hdev->lock */
5616 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5617 u8 addr_type, u8 auto_connect)
5619 struct hci_dev *hdev = req->hdev;
5620 struct hci_conn_params *params;
5622 params = hci_conn_params_add(hdev, addr, addr_type);
5626 if (params->auto_connect == auto_connect)
5629 list_del_init(¶ms->action);
5631 switch (auto_connect) {
5632 case HCI_AUTO_CONN_DISABLED:
5633 case HCI_AUTO_CONN_LINK_LOSS:
5634 __hci_update_background_scan(req);
5636 case HCI_AUTO_CONN_REPORT:
5637 list_add(¶ms->action, &hdev->pend_le_reports);
5638 __hci_update_background_scan(req);
5640 case HCI_AUTO_CONN_DIRECT:
5641 case HCI_AUTO_CONN_ALWAYS:
5642 if (!is_connected(hdev, addr, addr_type)) {
5643 list_add(¶ms->action, &hdev->pend_le_conns);
5644 __hci_update_background_scan(req);
5649 params->auto_connect = auto_connect;
5651 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5657 static void device_added(struct sock *sk, struct hci_dev *hdev,
5658 bdaddr_t *bdaddr, u8 type, u8 action)
5660 struct mgmt_ev_device_added ev;
5662 bacpy(&ev.addr.bdaddr, bdaddr);
5663 ev.addr.type = type;
5666 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5669 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5671 struct pending_cmd *cmd;
5673 BT_DBG("status 0x%02x", status);
5677 cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
5681 cmd->cmd_complete(cmd, mgmt_status(status));
5682 mgmt_pending_remove(cmd);
5685 hci_dev_unlock(hdev);
5688 static int add_device(struct sock *sk, struct hci_dev *hdev,
5689 void *data, u16 len)
5691 struct mgmt_cp_add_device *cp = data;
5692 struct pending_cmd *cmd;
5693 struct hci_request req;
5694 u8 auto_conn, addr_type;
5697 BT_DBG("%s", hdev->name);
5699 if (!bdaddr_type_is_valid(cp->addr.type) ||
5700 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5701 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5702 MGMT_STATUS_INVALID_PARAMS,
5703 &cp->addr, sizeof(cp->addr));
5705 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5706 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5707 MGMT_STATUS_INVALID_PARAMS,
5708 &cp->addr, sizeof(cp->addr));
5710 hci_req_init(&req, hdev);
5714 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5720 cmd->cmd_complete = addr_cmd_complete;
5722 if (cp->addr.type == BDADDR_BREDR) {
5723 /* Only incoming connections action is supported for now */
5724 if (cp->action != 0x01) {
5725 err = cmd->cmd_complete(cmd,
5726 MGMT_STATUS_INVALID_PARAMS);
5727 mgmt_pending_remove(cmd);
5731 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5736 __hci_update_page_scan(&req);
5741 if (cp->addr.type == BDADDR_LE_PUBLIC)
5742 addr_type = ADDR_LE_DEV_PUBLIC;
5744 addr_type = ADDR_LE_DEV_RANDOM;
5746 if (cp->action == 0x02)
5747 auto_conn = HCI_AUTO_CONN_ALWAYS;
5748 else if (cp->action == 0x01)
5749 auto_conn = HCI_AUTO_CONN_DIRECT;
5751 auto_conn = HCI_AUTO_CONN_REPORT;
5753 /* If the connection parameters don't exist for this device,
5754 * they will be created and configured with defaults.
5756 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5758 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5759 mgmt_pending_remove(cmd);
5764 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5766 err = hci_req_run(&req, add_device_complete);
5768 /* ENODATA means no HCI commands were needed (e.g. if
5769 * the adapter is powered off).
5771 if (err == -ENODATA)
5772 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5773 mgmt_pending_remove(cmd);
5777 hci_dev_unlock(hdev);
5781 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5782 bdaddr_t *bdaddr, u8 type)
5784 struct mgmt_ev_device_removed ev;
5786 bacpy(&ev.addr.bdaddr, bdaddr);
5787 ev.addr.type = type;
5789 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5792 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5794 struct pending_cmd *cmd;
5796 BT_DBG("status 0x%02x", status);
5800 cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5804 cmd->cmd_complete(cmd, mgmt_status(status));
5805 mgmt_pending_remove(cmd);
5808 hci_dev_unlock(hdev);
5811 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5812 void *data, u16 len)
5814 struct mgmt_cp_remove_device *cp = data;
5815 struct pending_cmd *cmd;
5816 struct hci_request req;
5819 BT_DBG("%s", hdev->name);
5821 hci_req_init(&req, hdev);
5825 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5831 cmd->cmd_complete = addr_cmd_complete;
5833 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5834 struct hci_conn_params *params;
5837 if (!bdaddr_type_is_valid(cp->addr.type)) {
5838 err = cmd->cmd_complete(cmd,
5839 MGMT_STATUS_INVALID_PARAMS);
5840 mgmt_pending_remove(cmd);
5844 if (cp->addr.type == BDADDR_BREDR) {
5845 err = hci_bdaddr_list_del(&hdev->whitelist,
5849 err = cmd->cmd_complete(cmd,
5850 MGMT_STATUS_INVALID_PARAMS);
5851 mgmt_pending_remove(cmd);
5855 __hci_update_page_scan(&req);
5857 device_removed(sk, hdev, &cp->addr.bdaddr,
5862 if (cp->addr.type == BDADDR_LE_PUBLIC)
5863 addr_type = ADDR_LE_DEV_PUBLIC;
5865 addr_type = ADDR_LE_DEV_RANDOM;
5867 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5870 err = cmd->cmd_complete(cmd,
5871 MGMT_STATUS_INVALID_PARAMS);
5872 mgmt_pending_remove(cmd);
5876 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5877 err = cmd->cmd_complete(cmd,
5878 MGMT_STATUS_INVALID_PARAMS);
5879 mgmt_pending_remove(cmd);
5883 list_del(¶ms->action);
5884 list_del(¶ms->list);
5886 __hci_update_background_scan(&req);
5888 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5890 struct hci_conn_params *p, *tmp;
5891 struct bdaddr_list *b, *btmp;
5893 if (cp->addr.type) {
5894 err = cmd->cmd_complete(cmd,
5895 MGMT_STATUS_INVALID_PARAMS);
5896 mgmt_pending_remove(cmd);
5900 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5901 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5906 __hci_update_page_scan(&req);
5908 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5909 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5911 device_removed(sk, hdev, &p->addr, p->addr_type);
5912 list_del(&p->action);
5917 BT_DBG("All LE connection parameters were removed");
5919 __hci_update_background_scan(&req);
5923 err = hci_req_run(&req, remove_device_complete);
5925 /* ENODATA means no HCI commands were needed (e.g. if
5926 * the adapter is powered off).
5928 if (err == -ENODATA)
5929 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5930 mgmt_pending_remove(cmd);
5934 hci_dev_unlock(hdev);
5938 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5941 struct mgmt_cp_load_conn_param *cp = data;
5942 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5943 sizeof(struct mgmt_conn_param));
5944 u16 param_count, expected_len;
5947 if (!lmp_le_capable(hdev))
5948 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5949 MGMT_STATUS_NOT_SUPPORTED);
5951 param_count = __le16_to_cpu(cp->param_count);
5952 if (param_count > max_param_count) {
5953 BT_ERR("load_conn_param: too big param_count value %u",
5955 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5956 MGMT_STATUS_INVALID_PARAMS);
5959 expected_len = sizeof(*cp) + param_count *
5960 sizeof(struct mgmt_conn_param);
5961 if (expected_len != len) {
5962 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5964 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5965 MGMT_STATUS_INVALID_PARAMS);
5968 BT_DBG("%s param_count %u", hdev->name, param_count);
5972 hci_conn_params_clear_disabled(hdev);
5974 for (i = 0; i < param_count; i++) {
5975 struct mgmt_conn_param *param = &cp->params[i];
5976 struct hci_conn_params *hci_param;
5977 u16 min, max, latency, timeout;
5980 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5983 if (param->addr.type == BDADDR_LE_PUBLIC) {
5984 addr_type = ADDR_LE_DEV_PUBLIC;
5985 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5986 addr_type = ADDR_LE_DEV_RANDOM;
5988 BT_ERR("Ignoring invalid connection parameters");
5992 min = le16_to_cpu(param->min_interval);
5993 max = le16_to_cpu(param->max_interval);
5994 latency = le16_to_cpu(param->latency);
5995 timeout = le16_to_cpu(param->timeout);
5997 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5998 min, max, latency, timeout);
6000 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6001 BT_ERR("Ignoring invalid connection parameters");
6005 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6008 BT_ERR("Failed to add connection parameters");
6012 hci_param->conn_min_interval = min;
6013 hci_param->conn_max_interval = max;
6014 hci_param->conn_latency = latency;
6015 hci_param->supervision_timeout = timeout;
6018 hci_dev_unlock(hdev);
6020 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
6023 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6024 void *data, u16 len)
6026 struct mgmt_cp_set_external_config *cp = data;
6030 BT_DBG("%s", hdev->name);
6032 if (hdev_is_powered(hdev))
6033 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6034 MGMT_STATUS_REJECTED);
6036 if (cp->config != 0x00 && cp->config != 0x01)
6037 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6038 MGMT_STATUS_INVALID_PARAMS);
6040 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6041 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6042 MGMT_STATUS_NOT_SUPPORTED);
6047 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
6050 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
6053 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6060 err = new_options(hdev, sk);
6062 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
6063 mgmt_index_removed(hdev);
6065 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6066 set_bit(HCI_CONFIG, &hdev->dev_flags);
6067 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
6069 queue_work(hdev->req_workqueue, &hdev->power_on);
6071 set_bit(HCI_RAW, &hdev->flags);
6072 mgmt_index_added(hdev);
6077 hci_dev_unlock(hdev);
6081 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6082 void *data, u16 len)
6084 struct mgmt_cp_set_public_address *cp = data;
6088 BT_DBG("%s", hdev->name);
6090 if (hdev_is_powered(hdev))
6091 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6092 MGMT_STATUS_REJECTED);
6094 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6095 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6096 MGMT_STATUS_INVALID_PARAMS);
6098 if (!hdev->set_bdaddr)
6099 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6100 MGMT_STATUS_NOT_SUPPORTED);
6104 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6105 bacpy(&hdev->public_addr, &cp->bdaddr);
6107 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6114 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6115 err = new_options(hdev, sk);
6117 if (is_configured(hdev)) {
6118 mgmt_index_removed(hdev);
6120 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
6122 set_bit(HCI_CONFIG, &hdev->dev_flags);
6123 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
6125 queue_work(hdev->req_workqueue, &hdev->power_on);
6129 hci_dev_unlock(hdev);
6133 static const struct mgmt_handler {
6134 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
6138 } mgmt_handlers[] = {
6139 { NULL }, /* 0x0000 (no command) */
6140 { read_version, false, MGMT_READ_VERSION_SIZE },
6141 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
6142 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
6143 { read_controller_info, false, MGMT_READ_INFO_SIZE },
6144 { set_powered, false, MGMT_SETTING_SIZE },
6145 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
6146 { set_connectable, false, MGMT_SETTING_SIZE },
6147 { set_fast_connectable, false, MGMT_SETTING_SIZE },
6148 { set_bondable, false, MGMT_SETTING_SIZE },
6149 { set_link_security, false, MGMT_SETTING_SIZE },
6150 { set_ssp, false, MGMT_SETTING_SIZE },
6151 { set_hs, false, MGMT_SETTING_SIZE },
6152 { set_le, false, MGMT_SETTING_SIZE },
6153 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
6154 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
6155 { add_uuid, false, MGMT_ADD_UUID_SIZE },
6156 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
6157 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
6158 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
6159 { disconnect, false, MGMT_DISCONNECT_SIZE },
6160 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
6161 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
6162 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6163 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
6164 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
6165 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6166 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
6167 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
6168 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6169 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
6170 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6171 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6172 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
6173 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6174 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
6175 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
6176 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
6177 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
6178 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
6179 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
6180 { set_advertising, false, MGMT_SETTING_SIZE },
6181 { set_bredr, false, MGMT_SETTING_SIZE },
6182 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
6183 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
6184 { set_secure_conn, false, MGMT_SETTING_SIZE },
6185 { set_debug_keys, false, MGMT_SETTING_SIZE },
6186 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
6187 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
6188 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
6189 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
6190 { add_device, false, MGMT_ADD_DEVICE_SIZE },
6191 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
6192 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
6193 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
6194 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
6195 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
6196 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
6197 { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
6200 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
6204 struct mgmt_hdr *hdr;
6205 u16 opcode, index, len;
6206 struct hci_dev *hdev = NULL;
6207 const struct mgmt_handler *handler;
6210 BT_DBG("got %zu bytes", msglen);
6212 if (msglen < sizeof(*hdr))
6215 buf = kmalloc(msglen, GFP_KERNEL);
6219 if (memcpy_from_msg(buf, msg, msglen)) {
6225 opcode = __le16_to_cpu(hdr->opcode);
6226 index = __le16_to_cpu(hdr->index);
6227 len = __le16_to_cpu(hdr->len);
6229 if (len != msglen - sizeof(*hdr)) {
6234 if (index != MGMT_INDEX_NONE) {
6235 hdev = hci_dev_get(index);
6237 err = cmd_status(sk, index, opcode,
6238 MGMT_STATUS_INVALID_INDEX);
6242 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
6243 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
6244 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
6245 err = cmd_status(sk, index, opcode,
6246 MGMT_STATUS_INVALID_INDEX);
6250 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
6251 opcode != MGMT_OP_READ_CONFIG_INFO &&
6252 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
6253 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
6254 err = cmd_status(sk, index, opcode,
6255 MGMT_STATUS_INVALID_INDEX);
6260 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
6261 mgmt_handlers[opcode].func == NULL) {
6262 BT_DBG("Unknown op %u", opcode);
6263 err = cmd_status(sk, index, opcode,
6264 MGMT_STATUS_UNKNOWN_COMMAND);
6268 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
6269 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6270 err = cmd_status(sk, index, opcode,
6271 MGMT_STATUS_INVALID_INDEX);
6275 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
6276 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6277 err = cmd_status(sk, index, opcode,
6278 MGMT_STATUS_INVALID_INDEX);
6282 handler = &mgmt_handlers[opcode];
6284 if ((handler->var_len && len < handler->data_len) ||
6285 (!handler->var_len && len != handler->data_len)) {
6286 err = cmd_status(sk, index, opcode,
6287 MGMT_STATUS_INVALID_PARAMS);
6292 mgmt_init_hdev(sk, hdev);
6294 cp = buf + sizeof(*hdr);
6296 err = handler->func(sk, hdev, cp, len);
6310 void mgmt_index_added(struct hci_dev *hdev)
6312 if (hdev->dev_type != HCI_BREDR)
6315 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6318 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6319 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6321 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6324 void mgmt_index_removed(struct hci_dev *hdev)
6326 u8 status = MGMT_STATUS_INVALID_INDEX;
6328 if (hdev->dev_type != HCI_BREDR)
6331 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6334 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6336 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6337 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6339 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6342 /* This function requires the caller holds hdev->lock */
6343 static void restart_le_actions(struct hci_request *req)
6345 struct hci_dev *hdev = req->hdev;
6346 struct hci_conn_params *p;
6348 list_for_each_entry(p, &hdev->le_conn_params, list) {
6349 /* Needed for AUTO_OFF case where might not "really"
6350 * have been powered off.
6352 list_del_init(&p->action);
6354 switch (p->auto_connect) {
6355 case HCI_AUTO_CONN_DIRECT:
6356 case HCI_AUTO_CONN_ALWAYS:
6357 list_add(&p->action, &hdev->pend_le_conns);
6359 case HCI_AUTO_CONN_REPORT:
6360 list_add(&p->action, &hdev->pend_le_reports);
6367 __hci_update_background_scan(req);
6370 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6372 struct cmd_lookup match = { NULL, hdev };
6374 BT_DBG("status 0x%02x", status);
6377 /* Register the available SMP channels (BR/EDR and LE) only
6378 * when successfully powering on the controller. This late
6379 * registration is required so that LE SMP can clearly
6380 * decide if the public address or static address is used.
6387 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6389 new_settings(hdev, match.sk);
6391 hci_dev_unlock(hdev);
6397 static int powered_update_hci(struct hci_dev *hdev)
6399 struct hci_request req;
6402 hci_req_init(&req, hdev);
6404 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6405 !lmp_host_ssp_capable(hdev)) {
6408 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
6410 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6413 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
6414 sizeof(support), &support);
6418 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6419 lmp_bredr_capable(hdev)) {
6420 struct hci_cp_write_le_host_supported cp;
6425 /* Check first if we already have the right
6426 * host state (host features set)
6428 if (cp.le != lmp_host_le_capable(hdev) ||
6429 cp.simul != lmp_host_le_br_capable(hdev))
6430 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6434 if (lmp_le_capable(hdev)) {
6435 /* Make sure the controller has a good default for
6436 * advertising data. This also applies to the case
6437 * where BR/EDR was toggled during the AUTO_OFF phase.
6439 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6440 update_adv_data(&req);
6441 update_scan_rsp_data(&req);
6444 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6445 enable_advertising(&req);
6447 restart_le_actions(&req);
6450 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6451 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6452 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6453 sizeof(link_sec), &link_sec);
6455 if (lmp_bredr_capable(hdev)) {
6456 write_fast_connectable(&req, false);
6457 __hci_update_page_scan(&req);
6463 return hci_req_run(&req, powered_complete);
6466 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6468 struct cmd_lookup match = { NULL, hdev };
6469 u8 status, zero_cod[] = { 0, 0, 0 };
6472 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6476 if (powered_update_hci(hdev) == 0)
6479 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6484 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6486 /* If the power off is because of hdev unregistration let
6487 * use the appropriate INVALID_INDEX status. Otherwise use
6488 * NOT_POWERED. We cover both scenarios here since later in
6489 * mgmt_index_removed() any hci_conn callbacks will have already
6490 * been triggered, potentially causing misleading DISCONNECTED
6493 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
6494 status = MGMT_STATUS_INVALID_INDEX;
6496 status = MGMT_STATUS_NOT_POWERED;
6498 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6500 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6501 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6502 zero_cod, sizeof(zero_cod), NULL);
6505 err = new_settings(hdev, match.sk);
6513 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6515 struct pending_cmd *cmd;
6518 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6522 if (err == -ERFKILL)
6523 status = MGMT_STATUS_RFKILLED;
6525 status = MGMT_STATUS_FAILED;
6527 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6529 mgmt_pending_remove(cmd);
6532 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6534 struct hci_request req;
6538 /* When discoverable timeout triggers, then just make sure
6539 * the limited discoverable flag is cleared. Even in the case
6540 * of a timeout triggered from general discoverable, it is
6541 * safe to unconditionally clear the flag.
6543 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6544 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6546 hci_req_init(&req, hdev);
6547 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6548 u8 scan = SCAN_PAGE;
6549 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6550 sizeof(scan), &scan);
6553 update_adv_data(&req);
6554 hci_req_run(&req, NULL);
6556 hdev->discov_timeout = 0;
6558 new_settings(hdev, NULL);
6560 hci_dev_unlock(hdev);
6563 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6566 struct mgmt_ev_new_link_key ev;
6568 memset(&ev, 0, sizeof(ev));
6570 ev.store_hint = persistent;
6571 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6572 ev.key.addr.type = BDADDR_BREDR;
6573 ev.key.type = key->type;
6574 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6575 ev.key.pin_len = key->pin_len;
6577 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6580 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6582 switch (ltk->type) {
6585 if (ltk->authenticated)
6586 return MGMT_LTK_AUTHENTICATED;
6587 return MGMT_LTK_UNAUTHENTICATED;
6589 if (ltk->authenticated)
6590 return MGMT_LTK_P256_AUTH;
6591 return MGMT_LTK_P256_UNAUTH;
6592 case SMP_LTK_P256_DEBUG:
6593 return MGMT_LTK_P256_DEBUG;
6596 return MGMT_LTK_UNAUTHENTICATED;
6599 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6601 struct mgmt_ev_new_long_term_key ev;
6603 memset(&ev, 0, sizeof(ev));
6605 /* Devices using resolvable or non-resolvable random addresses
6606 * without providing an indentity resolving key don't require
6607 * to store long term keys. Their addresses will change the
6610 * Only when a remote device provides an identity address
6611 * make sure the long term key is stored. If the remote
6612 * identity is known, the long term keys are internally
6613 * mapped to the identity address. So allow static random
6614 * and public addresses here.
6616 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6617 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6618 ev.store_hint = 0x00;
6620 ev.store_hint = persistent;
6622 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6623 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6624 ev.key.type = mgmt_ltk_type(key);
6625 ev.key.enc_size = key->enc_size;
6626 ev.key.ediv = key->ediv;
6627 ev.key.rand = key->rand;
6629 if (key->type == SMP_LTK)
6632 memcpy(ev.key.val, key->val, sizeof(key->val));
6634 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6637 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6639 struct mgmt_ev_new_irk ev;
6641 memset(&ev, 0, sizeof(ev));
6643 /* For identity resolving keys from devices that are already
6644 * using a public address or static random address, do not
6645 * ask for storing this key. The identity resolving key really
6646 * is only mandatory for devices using resovlable random
6649 * Storing all identity resolving keys has the downside that
6650 * they will be also loaded on next boot of they system. More
6651 * identity resolving keys, means more time during scanning is
6652 * needed to actually resolve these addresses.
6654 if (bacmp(&irk->rpa, BDADDR_ANY))
6655 ev.store_hint = 0x01;
6657 ev.store_hint = 0x00;
6659 bacpy(&ev.rpa, &irk->rpa);
6660 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6661 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6662 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6664 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6667 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6670 struct mgmt_ev_new_csrk ev;
6672 memset(&ev, 0, sizeof(ev));
6674 /* Devices using resolvable or non-resolvable random addresses
6675 * without providing an indentity resolving key don't require
6676 * to store signature resolving keys. Their addresses will change
6677 * the next time around.
6679 * Only when a remote device provides an identity address
6680 * make sure the signature resolving key is stored. So allow
6681 * static random and public addresses here.
6683 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6684 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6685 ev.store_hint = 0x00;
6687 ev.store_hint = persistent;
6689 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6690 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6691 ev.key.type = csrk->type;
6692 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6694 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6697 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6698 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6699 u16 max_interval, u16 latency, u16 timeout)
6701 struct mgmt_ev_new_conn_param ev;
6703 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6706 memset(&ev, 0, sizeof(ev));
6707 bacpy(&ev.addr.bdaddr, bdaddr);
6708 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6709 ev.store_hint = store_hint;
6710 ev.min_interval = cpu_to_le16(min_interval);
6711 ev.max_interval = cpu_to_le16(max_interval);
6712 ev.latency = cpu_to_le16(latency);
6713 ev.timeout = cpu_to_le16(timeout);
6715 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6718 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6721 eir[eir_len++] = sizeof(type) + data_len;
6722 eir[eir_len++] = type;
6723 memcpy(&eir[eir_len], data, data_len);
6724 eir_len += data_len;
6729 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6730 u32 flags, u8 *name, u8 name_len)
6733 struct mgmt_ev_device_connected *ev = (void *) buf;
6736 bacpy(&ev->addr.bdaddr, &conn->dst);
6737 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6739 ev->flags = __cpu_to_le32(flags);
6741 /* We must ensure that the EIR Data fields are ordered and
6742 * unique. Keep it simple for now and avoid the problem by not
6743 * adding any BR/EDR data to the LE adv.
6745 if (conn->le_adv_data_len > 0) {
6746 memcpy(&ev->eir[eir_len],
6747 conn->le_adv_data, conn->le_adv_data_len);
6748 eir_len = conn->le_adv_data_len;
6751 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6754 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6755 eir_len = eir_append_data(ev->eir, eir_len,
6757 conn->dev_class, 3);
6760 ev->eir_len = cpu_to_le16(eir_len);
6762 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6763 sizeof(*ev) + eir_len, NULL);
6766 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6768 struct sock **sk = data;
6770 cmd->cmd_complete(cmd, 0);
6775 mgmt_pending_remove(cmd);
6778 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6780 struct hci_dev *hdev = data;
6781 struct mgmt_cp_unpair_device *cp = cmd->param;
6783 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6785 cmd->cmd_complete(cmd, 0);
6786 mgmt_pending_remove(cmd);
6789 bool mgmt_powering_down(struct hci_dev *hdev)
6791 struct pending_cmd *cmd;
6792 struct mgmt_mode *cp;
6794 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6805 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6806 u8 link_type, u8 addr_type, u8 reason,
6807 bool mgmt_connected)
6809 struct mgmt_ev_device_disconnected ev;
6810 struct sock *sk = NULL;
6812 /* The connection is still in hci_conn_hash so test for 1
6813 * instead of 0 to know if this is the last one.
6815 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6816 cancel_delayed_work(&hdev->power_off);
6817 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6820 if (!mgmt_connected)
6823 if (link_type != ACL_LINK && link_type != LE_LINK)
6826 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6828 bacpy(&ev.addr.bdaddr, bdaddr);
6829 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6832 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6837 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6841 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6842 u8 link_type, u8 addr_type, u8 status)
6844 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6845 struct mgmt_cp_disconnect *cp;
6846 struct pending_cmd *cmd;
6848 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6851 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6857 if (bacmp(bdaddr, &cp->addr.bdaddr))
6860 if (cp->addr.type != bdaddr_type)
6863 cmd->cmd_complete(cmd, mgmt_status(status));
6864 mgmt_pending_remove(cmd);
6867 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6868 u8 addr_type, u8 status)
6870 struct mgmt_ev_connect_failed ev;
6872 /* The connection is still in hci_conn_hash so test for 1
6873 * instead of 0 to know if this is the last one.
6875 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6876 cancel_delayed_work(&hdev->power_off);
6877 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6880 bacpy(&ev.addr.bdaddr, bdaddr);
6881 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6882 ev.status = mgmt_status(status);
6884 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6887 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6889 struct mgmt_ev_pin_code_request ev;
6891 bacpy(&ev.addr.bdaddr, bdaddr);
6892 ev.addr.type = BDADDR_BREDR;
6895 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6898 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6901 struct pending_cmd *cmd;
6903 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6907 cmd->cmd_complete(cmd, mgmt_status(status));
6908 mgmt_pending_remove(cmd);
6911 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6914 struct pending_cmd *cmd;
6916 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6920 cmd->cmd_complete(cmd, mgmt_status(status));
6921 mgmt_pending_remove(cmd);
6924 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6925 u8 link_type, u8 addr_type, u32 value,
6928 struct mgmt_ev_user_confirm_request ev;
6930 BT_DBG("%s", hdev->name);
6932 bacpy(&ev.addr.bdaddr, bdaddr);
6933 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6934 ev.confirm_hint = confirm_hint;
6935 ev.value = cpu_to_le32(value);
6937 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6941 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6942 u8 link_type, u8 addr_type)
6944 struct mgmt_ev_user_passkey_request ev;
6946 BT_DBG("%s", hdev->name);
6948 bacpy(&ev.addr.bdaddr, bdaddr);
6949 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6951 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6955 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6956 u8 link_type, u8 addr_type, u8 status,
6959 struct pending_cmd *cmd;
6961 cmd = mgmt_pending_find(opcode, hdev);
6965 cmd->cmd_complete(cmd, mgmt_status(status));
6966 mgmt_pending_remove(cmd);
6971 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6972 u8 link_type, u8 addr_type, u8 status)
6974 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6975 status, MGMT_OP_USER_CONFIRM_REPLY);
6978 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6979 u8 link_type, u8 addr_type, u8 status)
6981 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6983 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6986 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6987 u8 link_type, u8 addr_type, u8 status)
6989 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6990 status, MGMT_OP_USER_PASSKEY_REPLY);
6993 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6994 u8 link_type, u8 addr_type, u8 status)
6996 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6998 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7001 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7002 u8 link_type, u8 addr_type, u32 passkey,
7005 struct mgmt_ev_passkey_notify ev;
7007 BT_DBG("%s", hdev->name);
7009 bacpy(&ev.addr.bdaddr, bdaddr);
7010 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7011 ev.passkey = __cpu_to_le32(passkey);
7012 ev.entered = entered;
7014 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7017 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7019 struct mgmt_ev_auth_failed ev;
7020 struct pending_cmd *cmd;
7021 u8 status = mgmt_status(hci_status);
7023 bacpy(&ev.addr.bdaddr, &conn->dst);
7024 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7027 cmd = find_pairing(conn);
7029 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7030 cmd ? cmd->sk : NULL);
7033 cmd->cmd_complete(cmd, status);
7034 mgmt_pending_remove(cmd);
7038 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7040 struct cmd_lookup match = { NULL, hdev };
7044 u8 mgmt_err = mgmt_status(status);
7045 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7046 cmd_status_rsp, &mgmt_err);
7050 if (test_bit(HCI_AUTH, &hdev->flags))
7051 changed = !test_and_set_bit(HCI_LINK_SECURITY,
7054 changed = test_and_clear_bit(HCI_LINK_SECURITY,
7057 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7061 new_settings(hdev, match.sk);
7067 static void clear_eir(struct hci_request *req)
7069 struct hci_dev *hdev = req->hdev;
7070 struct hci_cp_write_eir cp;
7072 if (!lmp_ext_inq_capable(hdev))
7075 memset(hdev->eir, 0, sizeof(hdev->eir));
7077 memset(&cp, 0, sizeof(cp));
7079 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7082 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7084 struct cmd_lookup match = { NULL, hdev };
7085 struct hci_request req;
7086 bool changed = false;
7089 u8 mgmt_err = mgmt_status(status);
7091 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
7092 &hdev->dev_flags)) {
7093 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
7094 new_settings(hdev, NULL);
7097 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7103 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
7105 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
7107 changed = test_and_clear_bit(HCI_HS_ENABLED,
7110 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
7113 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7116 new_settings(hdev, match.sk);
7121 hci_req_init(&req, hdev);
7123 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
7124 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
7125 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7126 sizeof(enable), &enable);
7132 hci_req_run(&req, NULL);
7135 static void sk_lookup(struct pending_cmd *cmd, void *data)
7137 struct cmd_lookup *match = data;
7139 if (match->sk == NULL) {
7140 match->sk = cmd->sk;
7141 sock_hold(match->sk);
7145 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7148 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7150 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7151 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7152 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7155 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
7162 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7164 struct mgmt_cp_set_local_name ev;
7165 struct pending_cmd *cmd;
7170 memset(&ev, 0, sizeof(ev));
7171 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7172 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7174 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7176 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7178 /* If this is a HCI command related to powering on the
7179 * HCI dev don't send any mgmt signals.
7181 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
7185 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7186 cmd ? cmd->sk : NULL);
7189 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7190 u8 *rand192, u8 *hash256, u8 *rand256,
7193 struct pending_cmd *cmd;
7195 BT_DBG("%s status %u", hdev->name, status);
7197 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7202 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7203 mgmt_status(status));
7205 struct mgmt_rp_read_local_oob_data rp;
7206 size_t rp_size = sizeof(rp);
7208 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7209 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7211 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7212 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7213 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7215 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7218 cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7222 mgmt_pending_remove(cmd);
7225 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7229 for (i = 0; i < uuid_count; i++) {
7230 if (!memcmp(uuid, uuids[i], 16))
7237 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7241 while (parsed < eir_len) {
7242 u8 field_len = eir[0];
7249 if (eir_len - parsed < field_len + 1)
7253 case EIR_UUID16_ALL:
7254 case EIR_UUID16_SOME:
7255 for (i = 0; i + 3 <= field_len; i += 2) {
7256 memcpy(uuid, bluetooth_base_uuid, 16);
7257 uuid[13] = eir[i + 3];
7258 uuid[12] = eir[i + 2];
7259 if (has_uuid(uuid, uuid_count, uuids))
7263 case EIR_UUID32_ALL:
7264 case EIR_UUID32_SOME:
7265 for (i = 0; i + 5 <= field_len; i += 4) {
7266 memcpy(uuid, bluetooth_base_uuid, 16);
7267 uuid[15] = eir[i + 5];
7268 uuid[14] = eir[i + 4];
7269 uuid[13] = eir[i + 3];
7270 uuid[12] = eir[i + 2];
7271 if (has_uuid(uuid, uuid_count, uuids))
7275 case EIR_UUID128_ALL:
7276 case EIR_UUID128_SOME:
7277 for (i = 0; i + 17 <= field_len; i += 16) {
7278 memcpy(uuid, eir + i + 2, 16);
7279 if (has_uuid(uuid, uuid_count, uuids))
7285 parsed += field_len + 1;
7286 eir += field_len + 1;
7292 static void restart_le_scan(struct hci_dev *hdev)
7294 /* If controller is not scanning we are done. */
7295 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
7298 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7299 hdev->discovery.scan_start +
7300 hdev->discovery.scan_duration))
7303 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
7304 DISCOV_LE_RESTART_DELAY);
7307 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7308 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7310 /* If a RSSI threshold has been specified, and
7311 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7312 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7313 * is set, let it through for further processing, as we might need to
7316 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7317 * the results are also dropped.
7319 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7320 (rssi == HCI_RSSI_INVALID ||
7321 (rssi < hdev->discovery.rssi &&
7322 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7325 if (hdev->discovery.uuid_count != 0) {
7326 /* If a list of UUIDs is provided in filter, results with no
7327 * matching UUID should be dropped.
7329 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7330 hdev->discovery.uuids) &&
7331 !eir_has_uuids(scan_rsp, scan_rsp_len,
7332 hdev->discovery.uuid_count,
7333 hdev->discovery.uuids))
7337 /* If duplicate filtering does not report RSSI changes, then restart
7338 * scanning to ensure updated result with updated RSSI values.
7340 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7341 restart_le_scan(hdev);
7343 /* Validate RSSI value against the RSSI threshold once more. */
7344 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7345 rssi < hdev->discovery.rssi)
7352 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7353 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7354 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7357 struct mgmt_ev_device_found *ev = (void *)buf;
7360 /* Don't send events for a non-kernel initiated discovery. With
7361 * LE one exception is if we have pend_le_reports > 0 in which
7362 * case we're doing passive scanning and want these events.
7364 if (!hci_discovery_active(hdev)) {
7365 if (link_type == ACL_LINK)
7367 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7371 if (hdev->discovery.result_filtering) {
7372 /* We are using service discovery */
7373 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7378 /* Make sure that the buffer is big enough. The 5 extra bytes
7379 * are for the potential CoD field.
7381 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7384 memset(buf, 0, sizeof(buf));
7386 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7387 * RSSI value was reported as 0 when not available. This behavior
7388 * is kept when using device discovery. This is required for full
7389 * backwards compatibility with the API.
7391 * However when using service discovery, the value 127 will be
7392 * returned when the RSSI is not available.
7394 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7395 link_type == ACL_LINK)
7398 bacpy(&ev->addr.bdaddr, bdaddr);
7399 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7401 ev->flags = cpu_to_le32(flags);
7404 /* Copy EIR or advertising data into event */
7405 memcpy(ev->eir, eir, eir_len);
7407 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7408 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7411 if (scan_rsp_len > 0)
7412 /* Append scan response data to event */
7413 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7415 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7416 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7418 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7421 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7422 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7424 struct mgmt_ev_device_found *ev;
7425 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7428 ev = (struct mgmt_ev_device_found *) buf;
7430 memset(buf, 0, sizeof(buf));
7432 bacpy(&ev->addr.bdaddr, bdaddr);
7433 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7436 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7439 ev->eir_len = cpu_to_le16(eir_len);
7441 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7444 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7446 struct mgmt_ev_discovering ev;
7448 BT_DBG("%s discovering %u", hdev->name, discovering);
7450 memset(&ev, 0, sizeof(ev));
7451 ev.type = hdev->discovery.type;
7452 ev.discovering = discovering;
7454 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7457 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7459 BT_DBG("%s status %u", hdev->name, status);
7462 void mgmt_reenable_advertising(struct hci_dev *hdev)
7464 struct hci_request req;
7466 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7469 hci_req_init(&req, hdev);
7470 enable_advertising(&req);
7471 hci_req_run(&req, adv_enable_complete);