2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
39 #define MGMT_VERSION 1
40 #define MGMT_REVISION 8
42 static const u16 mgmt_commands[] = {
43 MGMT_OP_READ_INDEX_LIST,
46 MGMT_OP_SET_DISCOVERABLE,
47 MGMT_OP_SET_CONNECTABLE,
48 MGMT_OP_SET_FAST_CONNECTABLE,
50 MGMT_OP_SET_LINK_SECURITY,
54 MGMT_OP_SET_DEV_CLASS,
55 MGMT_OP_SET_LOCAL_NAME,
58 MGMT_OP_LOAD_LINK_KEYS,
59 MGMT_OP_LOAD_LONG_TERM_KEYS,
61 MGMT_OP_GET_CONNECTIONS,
62 MGMT_OP_PIN_CODE_REPLY,
63 MGMT_OP_PIN_CODE_NEG_REPLY,
64 MGMT_OP_SET_IO_CAPABILITY,
66 MGMT_OP_CANCEL_PAIR_DEVICE,
67 MGMT_OP_UNPAIR_DEVICE,
68 MGMT_OP_USER_CONFIRM_REPLY,
69 MGMT_OP_USER_CONFIRM_NEG_REPLY,
70 MGMT_OP_USER_PASSKEY_REPLY,
71 MGMT_OP_USER_PASSKEY_NEG_REPLY,
72 MGMT_OP_READ_LOCAL_OOB_DATA,
73 MGMT_OP_ADD_REMOTE_OOB_DATA,
74 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
75 MGMT_OP_START_DISCOVERY,
76 MGMT_OP_STOP_DISCOVERY,
79 MGMT_OP_UNBLOCK_DEVICE,
80 MGMT_OP_SET_DEVICE_ID,
81 MGMT_OP_SET_ADVERTISING,
83 MGMT_OP_SET_STATIC_ADDRESS,
84 MGMT_OP_SET_SCAN_PARAMS,
85 MGMT_OP_SET_SECURE_CONN,
86 MGMT_OP_SET_DEBUG_KEYS,
89 MGMT_OP_GET_CONN_INFO,
90 MGMT_OP_GET_CLOCK_INFO,
92 MGMT_OP_REMOVE_DEVICE,
93 MGMT_OP_LOAD_CONN_PARAM,
94 MGMT_OP_READ_UNCONF_INDEX_LIST,
95 MGMT_OP_READ_CONFIG_INFO,
96 MGMT_OP_SET_EXTERNAL_CONFIG,
97 MGMT_OP_SET_PUBLIC_ADDRESS,
98 MGMT_OP_START_SERVICE_DISCOVERY,
101 static const u16 mgmt_events[] = {
102 MGMT_EV_CONTROLLER_ERROR,
104 MGMT_EV_INDEX_REMOVED,
105 MGMT_EV_NEW_SETTINGS,
106 MGMT_EV_CLASS_OF_DEV_CHANGED,
107 MGMT_EV_LOCAL_NAME_CHANGED,
108 MGMT_EV_NEW_LINK_KEY,
109 MGMT_EV_NEW_LONG_TERM_KEY,
110 MGMT_EV_DEVICE_CONNECTED,
111 MGMT_EV_DEVICE_DISCONNECTED,
112 MGMT_EV_CONNECT_FAILED,
113 MGMT_EV_PIN_CODE_REQUEST,
114 MGMT_EV_USER_CONFIRM_REQUEST,
115 MGMT_EV_USER_PASSKEY_REQUEST,
117 MGMT_EV_DEVICE_FOUND,
119 MGMT_EV_DEVICE_BLOCKED,
120 MGMT_EV_DEVICE_UNBLOCKED,
121 MGMT_EV_DEVICE_UNPAIRED,
122 MGMT_EV_PASSKEY_NOTIFY,
125 MGMT_EV_DEVICE_ADDED,
126 MGMT_EV_DEVICE_REMOVED,
127 MGMT_EV_NEW_CONN_PARAM,
128 MGMT_EV_UNCONF_INDEX_ADDED,
129 MGMT_EV_UNCONF_INDEX_REMOVED,
130 MGMT_EV_NEW_CONFIG_OPTIONS,
133 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
135 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
136 "\x00\x00\x00\x00\x00\x00\x00\x00"
139 struct list_head list;
146 int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
149 /* HCI to MGMT error code conversion table */
150 static u8 mgmt_status_table[] = {
152 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
153 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
154 MGMT_STATUS_FAILED, /* Hardware Failure */
155 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
156 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
157 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
158 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
159 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
160 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
161 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
162 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
163 MGMT_STATUS_BUSY, /* Command Disallowed */
164 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
165 MGMT_STATUS_REJECTED, /* Rejected Security */
166 MGMT_STATUS_REJECTED, /* Rejected Personal */
167 MGMT_STATUS_TIMEOUT, /* Host Timeout */
168 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
169 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
170 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
171 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
172 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
173 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
174 MGMT_STATUS_BUSY, /* Repeated Attempts */
175 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
176 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
177 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
178 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
179 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
180 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
181 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
182 MGMT_STATUS_FAILED, /* Unspecified Error */
183 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
184 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
185 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
186 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
187 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
188 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
189 MGMT_STATUS_FAILED, /* Unit Link Key Used */
190 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
191 MGMT_STATUS_TIMEOUT, /* Instant Passed */
192 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
193 MGMT_STATUS_FAILED, /* Transaction Collision */
194 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
195 MGMT_STATUS_REJECTED, /* QoS Rejected */
196 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
197 MGMT_STATUS_REJECTED, /* Insufficient Security */
198 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
199 MGMT_STATUS_BUSY, /* Role Switch Pending */
200 MGMT_STATUS_FAILED, /* Slot Violation */
201 MGMT_STATUS_FAILED, /* Role Switch Failed */
202 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
203 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
204 MGMT_STATUS_BUSY, /* Host Busy Pairing */
205 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
206 MGMT_STATUS_BUSY, /* Controller Busy */
207 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
208 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
209 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
210 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
211 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
214 static u8 mgmt_status(u8 hci_status)
216 if (hci_status < ARRAY_SIZE(mgmt_status_table))
217 return mgmt_status_table[hci_status];
219 return MGMT_STATUS_FAILED;
222 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
223 struct sock *skip_sk)
226 struct mgmt_hdr *hdr;
228 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
232 hdr = (void *) skb_put(skb, sizeof(*hdr));
233 hdr->opcode = cpu_to_le16(event);
235 hdr->index = cpu_to_le16(hdev->id);
237 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
238 hdr->len = cpu_to_le16(data_len);
241 memcpy(skb_put(skb, data_len), data, data_len);
244 __net_timestamp(skb);
246 hci_send_to_channel(HCI_CHANNEL_CONTROL, skb, skip_sk);
252 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
255 struct mgmt_hdr *hdr;
256 struct mgmt_ev_cmd_status *ev;
259 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
261 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
265 hdr = (void *) skb_put(skb, sizeof(*hdr));
267 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
268 hdr->index = cpu_to_le16(index);
269 hdr->len = cpu_to_le16(sizeof(*ev));
271 ev = (void *) skb_put(skb, sizeof(*ev));
273 ev->opcode = cpu_to_le16(cmd);
275 err = sock_queue_rcv_skb(sk, skb);
282 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
283 void *rp, size_t rp_len)
286 struct mgmt_hdr *hdr;
287 struct mgmt_ev_cmd_complete *ev;
290 BT_DBG("sock %p", sk);
292 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
296 hdr = (void *) skb_put(skb, sizeof(*hdr));
298 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
299 hdr->index = cpu_to_le16(index);
300 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
302 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
303 ev->opcode = cpu_to_le16(cmd);
307 memcpy(ev->data, rp, rp_len);
309 err = sock_queue_rcv_skb(sk, skb);
316 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
319 struct mgmt_rp_read_version rp;
321 BT_DBG("sock %p", sk);
323 rp.version = MGMT_VERSION;
324 rp.revision = cpu_to_le16(MGMT_REVISION);
326 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
330 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
333 struct mgmt_rp_read_commands *rp;
334 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
335 const u16 num_events = ARRAY_SIZE(mgmt_events);
340 BT_DBG("sock %p", sk);
342 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
344 rp = kmalloc(rp_size, GFP_KERNEL);
348 rp->num_commands = cpu_to_le16(num_commands);
349 rp->num_events = cpu_to_le16(num_events);
351 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
352 put_unaligned_le16(mgmt_commands[i], opcode);
354 for (i = 0; i < num_events; i++, opcode++)
355 put_unaligned_le16(mgmt_events[i], opcode);
357 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
364 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
367 struct mgmt_rp_read_index_list *rp;
373 BT_DBG("sock %p", sk);
375 read_lock(&hci_dev_list_lock);
378 list_for_each_entry(d, &hci_dev_list, list) {
379 if (d->dev_type == HCI_BREDR &&
380 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
384 rp_len = sizeof(*rp) + (2 * count);
385 rp = kmalloc(rp_len, GFP_ATOMIC);
387 read_unlock(&hci_dev_list_lock);
392 list_for_each_entry(d, &hci_dev_list, list) {
393 if (test_bit(HCI_SETUP, &d->dev_flags) ||
394 test_bit(HCI_CONFIG, &d->dev_flags) ||
395 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
398 /* Devices marked as raw-only are neither configured
399 * nor unconfigured controllers.
401 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
404 if (d->dev_type == HCI_BREDR &&
405 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
406 rp->index[count++] = cpu_to_le16(d->id);
407 BT_DBG("Added hci%u", d->id);
411 rp->num_controllers = cpu_to_le16(count);
412 rp_len = sizeof(*rp) + (2 * count);
414 read_unlock(&hci_dev_list_lock);
416 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
424 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
425 void *data, u16 data_len)
427 struct mgmt_rp_read_unconf_index_list *rp;
433 BT_DBG("sock %p", sk);
435 read_lock(&hci_dev_list_lock);
438 list_for_each_entry(d, &hci_dev_list, list) {
439 if (d->dev_type == HCI_BREDR &&
440 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
444 rp_len = sizeof(*rp) + (2 * count);
445 rp = kmalloc(rp_len, GFP_ATOMIC);
447 read_unlock(&hci_dev_list_lock);
452 list_for_each_entry(d, &hci_dev_list, list) {
453 if (test_bit(HCI_SETUP, &d->dev_flags) ||
454 test_bit(HCI_CONFIG, &d->dev_flags) ||
455 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
458 /* Devices marked as raw-only are neither configured
459 * nor unconfigured controllers.
461 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
464 if (d->dev_type == HCI_BREDR &&
465 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
466 rp->index[count++] = cpu_to_le16(d->id);
467 BT_DBG("Added hci%u", d->id);
471 rp->num_controllers = cpu_to_le16(count);
472 rp_len = sizeof(*rp) + (2 * count);
474 read_unlock(&hci_dev_list_lock);
476 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
484 static bool is_configured(struct hci_dev *hdev)
486 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
487 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
490 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
491 !bacmp(&hdev->public_addr, BDADDR_ANY))
497 static __le32 get_missing_options(struct hci_dev *hdev)
501 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
502 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
503 options |= MGMT_OPTION_EXTERNAL_CONFIG;
505 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
506 !bacmp(&hdev->public_addr, BDADDR_ANY))
507 options |= MGMT_OPTION_PUBLIC_ADDRESS;
509 return cpu_to_le32(options);
512 static int new_options(struct hci_dev *hdev, struct sock *skip)
514 __le32 options = get_missing_options(hdev);
516 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
517 sizeof(options), skip);
520 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
522 __le32 options = get_missing_options(hdev);
524 return cmd_complete(sk, hdev->id, opcode, 0, &options,
528 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
529 void *data, u16 data_len)
531 struct mgmt_rp_read_config_info rp;
534 BT_DBG("sock %p %s", sk, hdev->name);
538 memset(&rp, 0, sizeof(rp));
539 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
541 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
542 options |= MGMT_OPTION_EXTERNAL_CONFIG;
544 if (hdev->set_bdaddr)
545 options |= MGMT_OPTION_PUBLIC_ADDRESS;
547 rp.supported_options = cpu_to_le32(options);
548 rp.missing_options = get_missing_options(hdev);
550 hci_dev_unlock(hdev);
552 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
556 static u32 get_supported_settings(struct hci_dev *hdev)
560 settings |= MGMT_SETTING_POWERED;
561 settings |= MGMT_SETTING_BONDABLE;
562 settings |= MGMT_SETTING_DEBUG_KEYS;
563 settings |= MGMT_SETTING_CONNECTABLE;
564 settings |= MGMT_SETTING_DISCOVERABLE;
566 if (lmp_bredr_capable(hdev)) {
567 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
568 settings |= MGMT_SETTING_FAST_CONNECTABLE;
569 settings |= MGMT_SETTING_BREDR;
570 settings |= MGMT_SETTING_LINK_SECURITY;
572 if (lmp_ssp_capable(hdev)) {
573 settings |= MGMT_SETTING_SSP;
574 settings |= MGMT_SETTING_HS;
577 if (lmp_sc_capable(hdev))
578 settings |= MGMT_SETTING_SECURE_CONN;
581 if (lmp_le_capable(hdev)) {
582 settings |= MGMT_SETTING_LE;
583 settings |= MGMT_SETTING_ADVERTISING;
584 settings |= MGMT_SETTING_SECURE_CONN;
585 settings |= MGMT_SETTING_PRIVACY;
588 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
590 settings |= MGMT_SETTING_CONFIGURATION;
595 static u32 get_current_settings(struct hci_dev *hdev)
599 if (hdev_is_powered(hdev))
600 settings |= MGMT_SETTING_POWERED;
602 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
603 settings |= MGMT_SETTING_CONNECTABLE;
605 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
606 settings |= MGMT_SETTING_FAST_CONNECTABLE;
608 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
609 settings |= MGMT_SETTING_DISCOVERABLE;
611 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
612 settings |= MGMT_SETTING_BONDABLE;
614 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
615 settings |= MGMT_SETTING_BREDR;
617 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
618 settings |= MGMT_SETTING_LE;
620 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
621 settings |= MGMT_SETTING_LINK_SECURITY;
623 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
624 settings |= MGMT_SETTING_SSP;
626 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
627 settings |= MGMT_SETTING_HS;
629 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
630 settings |= MGMT_SETTING_ADVERTISING;
632 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
633 settings |= MGMT_SETTING_SECURE_CONN;
635 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
636 settings |= MGMT_SETTING_DEBUG_KEYS;
638 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
639 settings |= MGMT_SETTING_PRIVACY;
644 #define PNP_INFO_SVCLASS_ID 0x1200
646 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
648 u8 *ptr = data, *uuids_start = NULL;
649 struct bt_uuid *uuid;
654 list_for_each_entry(uuid, &hdev->uuids, list) {
657 if (uuid->size != 16)
660 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
664 if (uuid16 == PNP_INFO_SVCLASS_ID)
670 uuids_start[1] = EIR_UUID16_ALL;
674 /* Stop if not enough space to put next UUID */
675 if ((ptr - data) + sizeof(u16) > len) {
676 uuids_start[1] = EIR_UUID16_SOME;
680 *ptr++ = (uuid16 & 0x00ff);
681 *ptr++ = (uuid16 & 0xff00) >> 8;
682 uuids_start[0] += sizeof(uuid16);
688 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
690 u8 *ptr = data, *uuids_start = NULL;
691 struct bt_uuid *uuid;
696 list_for_each_entry(uuid, &hdev->uuids, list) {
697 if (uuid->size != 32)
703 uuids_start[1] = EIR_UUID32_ALL;
707 /* Stop if not enough space to put next UUID */
708 if ((ptr - data) + sizeof(u32) > len) {
709 uuids_start[1] = EIR_UUID32_SOME;
713 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
715 uuids_start[0] += sizeof(u32);
721 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
723 u8 *ptr = data, *uuids_start = NULL;
724 struct bt_uuid *uuid;
729 list_for_each_entry(uuid, &hdev->uuids, list) {
730 if (uuid->size != 128)
736 uuids_start[1] = EIR_UUID128_ALL;
740 /* Stop if not enough space to put next UUID */
741 if ((ptr - data) + 16 > len) {
742 uuids_start[1] = EIR_UUID128_SOME;
746 memcpy(ptr, uuid->uuid, 16);
748 uuids_start[0] += 16;
754 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
756 struct pending_cmd *cmd;
758 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
759 if (cmd->opcode == opcode)
766 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
767 struct hci_dev *hdev,
770 struct pending_cmd *cmd;
772 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
773 if (cmd->user_data != data)
775 if (cmd->opcode == opcode)
782 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
787 name_len = strlen(hdev->dev_name);
789 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
791 if (name_len > max_len) {
793 ptr[1] = EIR_NAME_SHORT;
795 ptr[1] = EIR_NAME_COMPLETE;
797 ptr[0] = name_len + 1;
799 memcpy(ptr + 2, hdev->dev_name, name_len);
801 ad_len += (name_len + 2);
802 ptr += (name_len + 2);
808 static void update_scan_rsp_data(struct hci_request *req)
810 struct hci_dev *hdev = req->hdev;
811 struct hci_cp_le_set_scan_rsp_data cp;
814 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
817 memset(&cp, 0, sizeof(cp));
819 len = create_scan_rsp_data(hdev, cp.data);
821 if (hdev->scan_rsp_data_len == len &&
822 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
825 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
826 hdev->scan_rsp_data_len = len;
830 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
833 static u8 get_adv_discov_flags(struct hci_dev *hdev)
835 struct pending_cmd *cmd;
837 /* If there's a pending mgmt command the flags will not yet have
838 * their final values, so check for this first.
840 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
842 struct mgmt_mode *cp = cmd->param;
844 return LE_AD_GENERAL;
845 else if (cp->val == 0x02)
846 return LE_AD_LIMITED;
848 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
849 return LE_AD_LIMITED;
850 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
851 return LE_AD_GENERAL;
857 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
859 u8 ad_len = 0, flags = 0;
861 flags |= get_adv_discov_flags(hdev);
863 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
864 flags |= LE_AD_NO_BREDR;
867 BT_DBG("adv flags 0x%02x", flags);
877 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
879 ptr[1] = EIR_TX_POWER;
880 ptr[2] = (u8) hdev->adv_tx_power;
889 static void update_adv_data(struct hci_request *req)
891 struct hci_dev *hdev = req->hdev;
892 struct hci_cp_le_set_adv_data cp;
895 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
898 memset(&cp, 0, sizeof(cp));
900 len = create_adv_data(hdev, cp.data);
902 if (hdev->adv_data_len == len &&
903 memcmp(cp.data, hdev->adv_data, len) == 0)
906 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
907 hdev->adv_data_len = len;
911 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
914 int mgmt_update_adv_data(struct hci_dev *hdev)
916 struct hci_request req;
918 hci_req_init(&req, hdev);
919 update_adv_data(&req);
921 return hci_req_run(&req, NULL);
924 static void create_eir(struct hci_dev *hdev, u8 *data)
929 name_len = strlen(hdev->dev_name);
935 ptr[1] = EIR_NAME_SHORT;
937 ptr[1] = EIR_NAME_COMPLETE;
939 /* EIR Data length */
940 ptr[0] = name_len + 1;
942 memcpy(ptr + 2, hdev->dev_name, name_len);
944 ptr += (name_len + 2);
947 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
949 ptr[1] = EIR_TX_POWER;
950 ptr[2] = (u8) hdev->inq_tx_power;
955 if (hdev->devid_source > 0) {
957 ptr[1] = EIR_DEVICE_ID;
959 put_unaligned_le16(hdev->devid_source, ptr + 2);
960 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
961 put_unaligned_le16(hdev->devid_product, ptr + 6);
962 put_unaligned_le16(hdev->devid_version, ptr + 8);
967 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
968 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
969 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
972 static void update_eir(struct hci_request *req)
974 struct hci_dev *hdev = req->hdev;
975 struct hci_cp_write_eir cp;
977 if (!hdev_is_powered(hdev))
980 if (!lmp_ext_inq_capable(hdev))
983 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
986 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
989 memset(&cp, 0, sizeof(cp));
991 create_eir(hdev, cp.data);
993 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
996 memcpy(hdev->eir, cp.data, sizeof(cp.data));
998 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1001 static u8 get_service_classes(struct hci_dev *hdev)
1003 struct bt_uuid *uuid;
1006 list_for_each_entry(uuid, &hdev->uuids, list)
1007 val |= uuid->svc_hint;
1012 static void update_class(struct hci_request *req)
1014 struct hci_dev *hdev = req->hdev;
1017 BT_DBG("%s", hdev->name);
1019 if (!hdev_is_powered(hdev))
1022 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1025 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1028 cod[0] = hdev->minor_class;
1029 cod[1] = hdev->major_class;
1030 cod[2] = get_service_classes(hdev);
1032 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1035 if (memcmp(cod, hdev->dev_class, 3) == 0)
1038 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1041 static bool get_connectable(struct hci_dev *hdev)
1043 struct pending_cmd *cmd;
1045 /* If there's a pending mgmt command the flag will not yet have
1046 * it's final value, so check for this first.
1048 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1050 struct mgmt_mode *cp = cmd->param;
1054 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1057 static void disable_advertising(struct hci_request *req)
1061 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1064 static void enable_advertising(struct hci_request *req)
1066 struct hci_dev *hdev = req->hdev;
1067 struct hci_cp_le_set_adv_param cp;
1068 u8 own_addr_type, enable = 0x01;
1071 if (hci_conn_num(hdev, LE_LINK) > 0)
1074 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1075 disable_advertising(req);
1077 /* Clear the HCI_LE_ADV bit temporarily so that the
1078 * hci_update_random_address knows that it's safe to go ahead
1079 * and write a new random address. The flag will be set back on
1080 * as soon as the SET_ADV_ENABLE HCI command completes.
1082 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1084 connectable = get_connectable(hdev);
1086 /* Set require_privacy to true only when non-connectable
1087 * advertising is used. In that case it is fine to use a
1088 * non-resolvable private address.
1090 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1093 memset(&cp, 0, sizeof(cp));
1094 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1095 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1096 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1097 cp.own_address_type = own_addr_type;
1098 cp.channel_map = hdev->le_adv_channel_map;
1100 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1102 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1105 static void service_cache_off(struct work_struct *work)
1107 struct hci_dev *hdev = container_of(work, struct hci_dev,
1108 service_cache.work);
1109 struct hci_request req;
1111 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1114 hci_req_init(&req, hdev);
1121 hci_dev_unlock(hdev);
1123 hci_req_run(&req, NULL);
1126 static void rpa_expired(struct work_struct *work)
1128 struct hci_dev *hdev = container_of(work, struct hci_dev,
1130 struct hci_request req;
1134 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1136 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1139 /* The generation of a new RPA and programming it into the
1140 * controller happens in the enable_advertising() function.
1142 hci_req_init(&req, hdev);
1143 enable_advertising(&req);
1144 hci_req_run(&req, NULL);
1147 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1149 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1152 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1153 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1155 /* Non-mgmt controlled devices get this bit set
1156 * implicitly so that pairing works for them, however
1157 * for mgmt we require user-space to explicitly enable
1160 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1163 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1164 void *data, u16 data_len)
1166 struct mgmt_rp_read_info rp;
1168 BT_DBG("sock %p %s", sk, hdev->name);
1172 memset(&rp, 0, sizeof(rp));
1174 bacpy(&rp.bdaddr, &hdev->bdaddr);
1176 rp.version = hdev->hci_ver;
1177 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1179 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1180 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1182 memcpy(rp.dev_class, hdev->dev_class, 3);
1184 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1185 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1187 hci_dev_unlock(hdev);
1189 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1193 static void mgmt_pending_free(struct pending_cmd *cmd)
1200 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1201 struct hci_dev *hdev, void *data,
1204 struct pending_cmd *cmd;
1206 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1210 cmd->opcode = opcode;
1211 cmd->index = hdev->id;
1213 cmd->param = kmemdup(data, len, GFP_KERNEL);
1219 cmd->param_len = len;
1224 list_add(&cmd->list, &hdev->mgmt_pending);
1229 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1230 void (*cb)(struct pending_cmd *cmd,
1234 struct pending_cmd *cmd, *tmp;
1236 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1237 if (opcode > 0 && cmd->opcode != opcode)
1244 static void mgmt_pending_remove(struct pending_cmd *cmd)
1246 list_del(&cmd->list);
1247 mgmt_pending_free(cmd);
1250 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1252 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1254 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1258 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1260 BT_DBG("%s status 0x%02x", hdev->name, status);
1262 if (hci_conn_count(hdev) == 0) {
1263 cancel_delayed_work(&hdev->power_off);
1264 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1268 static bool hci_stop_discovery(struct hci_request *req)
1270 struct hci_dev *hdev = req->hdev;
1271 struct hci_cp_remote_name_req_cancel cp;
1272 struct inquiry_entry *e;
1274 switch (hdev->discovery.state) {
1275 case DISCOVERY_FINDING:
1276 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1277 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1279 cancel_delayed_work(&hdev->le_scan_disable);
1280 hci_req_add_le_scan_disable(req);
1285 case DISCOVERY_RESOLVING:
1286 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1291 bacpy(&cp.bdaddr, &e->data.bdaddr);
1292 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1298 /* Passive scanning */
1299 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1300 hci_req_add_le_scan_disable(req);
1310 static int clean_up_hci_state(struct hci_dev *hdev)
1312 struct hci_request req;
1313 struct hci_conn *conn;
1314 bool discov_stopped;
1317 hci_req_init(&req, hdev);
1319 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1320 test_bit(HCI_PSCAN, &hdev->flags)) {
1322 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1325 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1326 disable_advertising(&req);
1328 discov_stopped = hci_stop_discovery(&req);
1330 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1331 struct hci_cp_disconnect dc;
1332 struct hci_cp_reject_conn_req rej;
1334 switch (conn->state) {
1337 dc.handle = cpu_to_le16(conn->handle);
1338 dc.reason = 0x15; /* Terminated due to Power Off */
1339 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1342 if (conn->type == LE_LINK)
1343 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1345 else if (conn->type == ACL_LINK)
1346 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1350 bacpy(&rej.bdaddr, &conn->dst);
1351 rej.reason = 0x15; /* Terminated due to Power Off */
1352 if (conn->type == ACL_LINK)
1353 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1355 else if (conn->type == SCO_LINK)
1356 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1362 err = hci_req_run(&req, clean_up_hci_complete);
1363 if (!err && discov_stopped)
1364 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1369 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1372 struct mgmt_mode *cp = data;
1373 struct pending_cmd *cmd;
1376 BT_DBG("request for %s", hdev->name);
1378 if (cp->val != 0x00 && cp->val != 0x01)
1379 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1380 MGMT_STATUS_INVALID_PARAMS);
1384 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1385 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1390 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1391 cancel_delayed_work(&hdev->power_off);
1394 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1396 err = mgmt_powered(hdev, 1);
1401 if (!!cp->val == hdev_is_powered(hdev)) {
1402 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1406 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1413 queue_work(hdev->req_workqueue, &hdev->power_on);
1416 /* Disconnect connections, stop scans, etc */
1417 err = clean_up_hci_state(hdev);
1419 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1420 HCI_POWER_OFF_TIMEOUT);
1422 /* ENODATA means there were no HCI commands queued */
1423 if (err == -ENODATA) {
1424 cancel_delayed_work(&hdev->power_off);
1425 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1431 hci_dev_unlock(hdev);
1435 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1439 ev = cpu_to_le32(get_current_settings(hdev));
1441 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1444 int mgmt_new_settings(struct hci_dev *hdev)
1446 return new_settings(hdev, NULL);
1451 struct hci_dev *hdev;
1455 static void settings_rsp(struct pending_cmd *cmd, void *data)
1457 struct cmd_lookup *match = data;
1459 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1461 list_del(&cmd->list);
1463 if (match->sk == NULL) {
1464 match->sk = cmd->sk;
1465 sock_hold(match->sk);
1468 mgmt_pending_free(cmd);
1471 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1475 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1476 mgmt_pending_remove(cmd);
1479 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1481 if (cmd->cmd_complete) {
1484 cmd->cmd_complete(cmd, *status);
1485 mgmt_pending_remove(cmd);
1490 cmd_status_rsp(cmd, data);
1493 static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1495 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1496 cmd->param, cmd->param_len);
1499 static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1501 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1502 sizeof(struct mgmt_addr_info));
1505 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1507 if (!lmp_bredr_capable(hdev))
1508 return MGMT_STATUS_NOT_SUPPORTED;
1509 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1510 return MGMT_STATUS_REJECTED;
1512 return MGMT_STATUS_SUCCESS;
1515 static u8 mgmt_le_support(struct hci_dev *hdev)
1517 if (!lmp_le_capable(hdev))
1518 return MGMT_STATUS_NOT_SUPPORTED;
1519 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1520 return MGMT_STATUS_REJECTED;
1522 return MGMT_STATUS_SUCCESS;
1525 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1528 struct pending_cmd *cmd;
1529 struct mgmt_mode *cp;
1530 struct hci_request req;
1533 BT_DBG("status 0x%02x", status);
1537 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1542 u8 mgmt_err = mgmt_status(status);
1543 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1544 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1550 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1553 if (hdev->discov_timeout > 0) {
1554 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1555 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1559 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1563 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1566 new_settings(hdev, cmd->sk);
1568 /* When the discoverable mode gets changed, make sure
1569 * that class of device has the limited discoverable
1570 * bit correctly set. Also update page scan based on whitelist
1573 hci_req_init(&req, hdev);
1574 __hci_update_page_scan(&req);
1576 hci_req_run(&req, NULL);
1579 mgmt_pending_remove(cmd);
1582 hci_dev_unlock(hdev);
1585 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1588 struct mgmt_cp_set_discoverable *cp = data;
1589 struct pending_cmd *cmd;
1590 struct hci_request req;
1595 BT_DBG("request for %s", hdev->name);
1597 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1598 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1599 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_REJECTED);
1602 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1603 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1604 MGMT_STATUS_INVALID_PARAMS);
1606 timeout = __le16_to_cpu(cp->timeout);
1608 /* Disabling discoverable requires that no timeout is set,
1609 * and enabling limited discoverable requires a timeout.
1611 if ((cp->val == 0x00 && timeout > 0) ||
1612 (cp->val == 0x02 && timeout == 0))
1613 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1614 MGMT_STATUS_INVALID_PARAMS);
1618 if (!hdev_is_powered(hdev) && timeout > 0) {
1619 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1620 MGMT_STATUS_NOT_POWERED);
1624 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1625 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1626 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1631 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1632 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1633 MGMT_STATUS_REJECTED);
1637 if (!hdev_is_powered(hdev)) {
1638 bool changed = false;
1640 /* Setting limited discoverable when powered off is
1641 * not a valid operation since it requires a timeout
1642 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1644 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1645 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1649 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1654 err = new_settings(hdev, sk);
1659 /* If the current mode is the same, then just update the timeout
1660 * value with the new value. And if only the timeout gets updated,
1661 * then no need for any HCI transactions.
1663 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1664 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1665 &hdev->dev_flags)) {
1666 cancel_delayed_work(&hdev->discov_off);
1667 hdev->discov_timeout = timeout;
1669 if (cp->val && hdev->discov_timeout > 0) {
1670 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1671 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1675 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1679 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1685 /* Cancel any potential discoverable timeout that might be
1686 * still active and store new timeout value. The arming of
1687 * the timeout happens in the complete handler.
1689 cancel_delayed_work(&hdev->discov_off);
1690 hdev->discov_timeout = timeout;
1692 /* Limited discoverable mode */
1693 if (cp->val == 0x02)
1694 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1696 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1698 hci_req_init(&req, hdev);
1700 /* The procedure for LE-only controllers is much simpler - just
1701 * update the advertising data.
1703 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1709 struct hci_cp_write_current_iac_lap hci_cp;
1711 if (cp->val == 0x02) {
1712 /* Limited discoverable mode */
1713 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1714 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1715 hci_cp.iac_lap[1] = 0x8b;
1716 hci_cp.iac_lap[2] = 0x9e;
1717 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1718 hci_cp.iac_lap[4] = 0x8b;
1719 hci_cp.iac_lap[5] = 0x9e;
1721 /* General discoverable mode */
1723 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1724 hci_cp.iac_lap[1] = 0x8b;
1725 hci_cp.iac_lap[2] = 0x9e;
1728 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1729 (hci_cp.num_iac * 3) + 1, &hci_cp);
1731 scan |= SCAN_INQUIRY;
1733 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1736 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1739 update_adv_data(&req);
1741 err = hci_req_run(&req, set_discoverable_complete);
1743 mgmt_pending_remove(cmd);
1746 hci_dev_unlock(hdev);
1750 static void write_fast_connectable(struct hci_request *req, bool enable)
1752 struct hci_dev *hdev = req->hdev;
1753 struct hci_cp_write_page_scan_activity acp;
1756 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1759 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1763 type = PAGE_SCAN_TYPE_INTERLACED;
1765 /* 160 msec page scan interval */
1766 acp.interval = cpu_to_le16(0x0100);
1768 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1770 /* default 1.28 sec page scan */
1771 acp.interval = cpu_to_le16(0x0800);
1774 acp.window = cpu_to_le16(0x0012);
1776 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1777 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1778 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1781 if (hdev->page_scan_type != type)
1782 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1785 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1788 struct pending_cmd *cmd;
1789 struct mgmt_mode *cp;
1790 bool conn_changed, discov_changed;
1792 BT_DBG("status 0x%02x", status);
1796 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1801 u8 mgmt_err = mgmt_status(status);
1802 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1808 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1810 discov_changed = false;
1812 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1814 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1818 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1820 if (conn_changed || discov_changed) {
1821 new_settings(hdev, cmd->sk);
1822 hci_update_page_scan(hdev);
1824 mgmt_update_adv_data(hdev);
1825 hci_update_background_scan(hdev);
1829 mgmt_pending_remove(cmd);
1832 hci_dev_unlock(hdev);
1835 static int set_connectable_update_settings(struct hci_dev *hdev,
1836 struct sock *sk, u8 val)
1838 bool changed = false;
1841 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1845 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1847 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1848 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1851 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1856 hci_update_page_scan(hdev);
1857 hci_update_background_scan(hdev);
1858 return new_settings(hdev, sk);
1864 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1867 struct mgmt_mode *cp = data;
1868 struct pending_cmd *cmd;
1869 struct hci_request req;
1873 BT_DBG("request for %s", hdev->name);
1875 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1876 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1877 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1878 MGMT_STATUS_REJECTED);
1880 if (cp->val != 0x00 && cp->val != 0x01)
1881 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1882 MGMT_STATUS_INVALID_PARAMS);
1886 if (!hdev_is_powered(hdev)) {
1887 err = set_connectable_update_settings(hdev, sk, cp->val);
1891 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1892 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1893 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1898 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1904 hci_req_init(&req, hdev);
1906 /* If BR/EDR is not enabled and we disable advertising as a
1907 * by-product of disabling connectable, we need to update the
1908 * advertising flags.
1910 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1912 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1913 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1915 update_adv_data(&req);
1916 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1920 /* If we don't have any whitelist entries just
1921 * disable all scanning. If there are entries
1922 * and we had both page and inquiry scanning
1923 * enabled then fall back to only page scanning.
1924 * Otherwise no changes are needed.
1926 if (list_empty(&hdev->whitelist))
1927 scan = SCAN_DISABLED;
1928 else if (test_bit(HCI_ISCAN, &hdev->flags))
1931 goto no_scan_update;
1933 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1934 hdev->discov_timeout > 0)
1935 cancel_delayed_work(&hdev->discov_off);
1938 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1942 /* If we're going from non-connectable to connectable or
1943 * vice-versa when fast connectable is enabled ensure that fast
1944 * connectable gets disabled. write_fast_connectable won't do
1945 * anything if the page scan parameters are already what they
1948 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1949 write_fast_connectable(&req, false);
1951 /* Update the advertising parameters if necessary */
1952 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1953 enable_advertising(&req);
1955 err = hci_req_run(&req, set_connectable_complete);
1957 mgmt_pending_remove(cmd);
1958 if (err == -ENODATA)
1959 err = set_connectable_update_settings(hdev, sk,
1965 hci_dev_unlock(hdev);
1969 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1972 struct mgmt_mode *cp = data;
1976 BT_DBG("request for %s", hdev->name);
1978 if (cp->val != 0x00 && cp->val != 0x01)
1979 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1980 MGMT_STATUS_INVALID_PARAMS);
1985 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1987 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1989 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1994 err = new_settings(hdev, sk);
1997 hci_dev_unlock(hdev);
2001 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2004 struct mgmt_mode *cp = data;
2005 struct pending_cmd *cmd;
2009 BT_DBG("request for %s", hdev->name);
2011 status = mgmt_bredr_support(hdev);
2013 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2016 if (cp->val != 0x00 && cp->val != 0x01)
2017 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2018 MGMT_STATUS_INVALID_PARAMS);
2022 if (!hdev_is_powered(hdev)) {
2023 bool changed = false;
2025 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2026 &hdev->dev_flags)) {
2027 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2031 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2036 err = new_settings(hdev, sk);
2041 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2042 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2049 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2050 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2054 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2060 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2062 mgmt_pending_remove(cmd);
2067 hci_dev_unlock(hdev);
2071 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2073 struct mgmt_mode *cp = data;
2074 struct pending_cmd *cmd;
2078 BT_DBG("request for %s", hdev->name);
2080 status = mgmt_bredr_support(hdev);
2082 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2084 if (!lmp_ssp_capable(hdev))
2085 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2086 MGMT_STATUS_NOT_SUPPORTED);
2088 if (cp->val != 0x00 && cp->val != 0x01)
2089 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2090 MGMT_STATUS_INVALID_PARAMS);
2094 if (!hdev_is_powered(hdev)) {
2098 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2101 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2104 changed = test_and_clear_bit(HCI_HS_ENABLED,
2107 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2110 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2115 err = new_settings(hdev, sk);
2120 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2121 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2126 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2127 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2131 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2137 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2138 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2139 sizeof(cp->val), &cp->val);
2141 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2143 mgmt_pending_remove(cmd);
2148 hci_dev_unlock(hdev);
2152 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2154 struct mgmt_mode *cp = data;
2159 BT_DBG("request for %s", hdev->name);
2161 status = mgmt_bredr_support(hdev);
2163 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2165 if (!lmp_ssp_capable(hdev))
2166 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2167 MGMT_STATUS_NOT_SUPPORTED);
2169 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2170 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2171 MGMT_STATUS_REJECTED);
2173 if (cp->val != 0x00 && cp->val != 0x01)
2174 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2175 MGMT_STATUS_INVALID_PARAMS);
2179 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2180 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2186 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2188 if (hdev_is_powered(hdev)) {
2189 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2190 MGMT_STATUS_REJECTED);
2194 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2197 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2202 err = new_settings(hdev, sk);
2205 hci_dev_unlock(hdev);
2209 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2211 struct cmd_lookup match = { NULL, hdev };
2216 u8 mgmt_err = mgmt_status(status);
2218 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2223 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2225 new_settings(hdev, match.sk);
2230 /* Make sure the controller has a good default for
2231 * advertising data. Restrict the update to when LE
2232 * has actually been enabled. During power on, the
2233 * update in powered_update_hci will take care of it.
2235 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2236 struct hci_request req;
2238 hci_req_init(&req, hdev);
2239 update_adv_data(&req);
2240 update_scan_rsp_data(&req);
2241 __hci_update_background_scan(&req);
2242 hci_req_run(&req, NULL);
2246 hci_dev_unlock(hdev);
2249 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2251 struct mgmt_mode *cp = data;
2252 struct hci_cp_write_le_host_supported hci_cp;
2253 struct pending_cmd *cmd;
2254 struct hci_request req;
2258 BT_DBG("request for %s", hdev->name);
2260 if (!lmp_le_capable(hdev))
2261 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2262 MGMT_STATUS_NOT_SUPPORTED);
2264 if (cp->val != 0x00 && cp->val != 0x01)
2265 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2266 MGMT_STATUS_INVALID_PARAMS);
2268 /* LE-only devices do not allow toggling LE on/off */
2269 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2270 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2271 MGMT_STATUS_REJECTED);
2276 enabled = lmp_host_le_capable(hdev);
2278 if (!hdev_is_powered(hdev) || val == enabled) {
2279 bool changed = false;
2281 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2282 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2286 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2287 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2291 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2296 err = new_settings(hdev, sk);
2301 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2302 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2303 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2308 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2314 hci_req_init(&req, hdev);
2316 memset(&hci_cp, 0, sizeof(hci_cp));
2320 hci_cp.simul = 0x00;
2322 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2323 disable_advertising(&req);
2326 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2329 err = hci_req_run(&req, le_enable_complete);
2331 mgmt_pending_remove(cmd);
2334 hci_dev_unlock(hdev);
2338 /* This is a helper function to test for pending mgmt commands that can
2339 * cause CoD or EIR HCI commands. We can only allow one such pending
2340 * mgmt command at a time since otherwise we cannot easily track what
2341 * the current values are, will be, and based on that calculate if a new
2342 * HCI command needs to be sent and if yes with what value.
2344 static bool pending_eir_or_class(struct hci_dev *hdev)
2346 struct pending_cmd *cmd;
2348 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2349 switch (cmd->opcode) {
2350 case MGMT_OP_ADD_UUID:
2351 case MGMT_OP_REMOVE_UUID:
2352 case MGMT_OP_SET_DEV_CLASS:
2353 case MGMT_OP_SET_POWERED:
2361 static const u8 bluetooth_base_uuid[] = {
2362 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2363 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2366 static u8 get_uuid_size(const u8 *uuid)
2370 if (memcmp(uuid, bluetooth_base_uuid, 12))
2373 val = get_unaligned_le32(&uuid[12]);
2380 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2382 struct pending_cmd *cmd;
2386 cmd = mgmt_pending_find(mgmt_op, hdev);
2390 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2391 hdev->dev_class, 3);
2393 mgmt_pending_remove(cmd);
2396 hci_dev_unlock(hdev);
2399 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2401 BT_DBG("status 0x%02x", status);
2403 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2406 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2408 struct mgmt_cp_add_uuid *cp = data;
2409 struct pending_cmd *cmd;
2410 struct hci_request req;
2411 struct bt_uuid *uuid;
2414 BT_DBG("request for %s", hdev->name);
2418 if (pending_eir_or_class(hdev)) {
2419 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2424 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2430 memcpy(uuid->uuid, cp->uuid, 16);
2431 uuid->svc_hint = cp->svc_hint;
2432 uuid->size = get_uuid_size(cp->uuid);
2434 list_add_tail(&uuid->list, &hdev->uuids);
2436 hci_req_init(&req, hdev);
2441 err = hci_req_run(&req, add_uuid_complete);
2443 if (err != -ENODATA)
2446 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2447 hdev->dev_class, 3);
2451 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2460 hci_dev_unlock(hdev);
2464 static bool enable_service_cache(struct hci_dev *hdev)
2466 if (!hdev_is_powered(hdev))
2469 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2470 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2478 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2480 BT_DBG("status 0x%02x", status);
2482 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2485 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2488 struct mgmt_cp_remove_uuid *cp = data;
2489 struct pending_cmd *cmd;
2490 struct bt_uuid *match, *tmp;
2491 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2492 struct hci_request req;
2495 BT_DBG("request for %s", hdev->name);
2499 if (pending_eir_or_class(hdev)) {
2500 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2505 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2506 hci_uuids_clear(hdev);
2508 if (enable_service_cache(hdev)) {
2509 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2510 0, hdev->dev_class, 3);
2519 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2520 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2523 list_del(&match->list);
2529 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2530 MGMT_STATUS_INVALID_PARAMS);
2535 hci_req_init(&req, hdev);
2540 err = hci_req_run(&req, remove_uuid_complete);
2542 if (err != -ENODATA)
2545 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2546 hdev->dev_class, 3);
2550 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2559 hci_dev_unlock(hdev);
2563 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2565 BT_DBG("status 0x%02x", status);
2567 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2570 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2573 struct mgmt_cp_set_dev_class *cp = data;
2574 struct pending_cmd *cmd;
2575 struct hci_request req;
2578 BT_DBG("request for %s", hdev->name);
2580 if (!lmp_bredr_capable(hdev))
2581 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2582 MGMT_STATUS_NOT_SUPPORTED);
2586 if (pending_eir_or_class(hdev)) {
2587 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2592 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2593 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2594 MGMT_STATUS_INVALID_PARAMS);
2598 hdev->major_class = cp->major;
2599 hdev->minor_class = cp->minor;
2601 if (!hdev_is_powered(hdev)) {
2602 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2603 hdev->dev_class, 3);
2607 hci_req_init(&req, hdev);
2609 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2610 hci_dev_unlock(hdev);
2611 cancel_delayed_work_sync(&hdev->service_cache);
2618 err = hci_req_run(&req, set_class_complete);
2620 if (err != -ENODATA)
2623 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2624 hdev->dev_class, 3);
2628 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2637 hci_dev_unlock(hdev);
2641 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2644 struct mgmt_cp_load_link_keys *cp = data;
2645 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2646 sizeof(struct mgmt_link_key_info));
2647 u16 key_count, expected_len;
2651 BT_DBG("request for %s", hdev->name);
2653 if (!lmp_bredr_capable(hdev))
2654 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2655 MGMT_STATUS_NOT_SUPPORTED);
2657 key_count = __le16_to_cpu(cp->key_count);
2658 if (key_count > max_key_count) {
2659 BT_ERR("load_link_keys: too big key_count value %u",
2661 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2662 MGMT_STATUS_INVALID_PARAMS);
2665 expected_len = sizeof(*cp) + key_count *
2666 sizeof(struct mgmt_link_key_info);
2667 if (expected_len != len) {
2668 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2670 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2671 MGMT_STATUS_INVALID_PARAMS);
2674 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2675 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2676 MGMT_STATUS_INVALID_PARAMS);
2678 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2681 for (i = 0; i < key_count; i++) {
2682 struct mgmt_link_key_info *key = &cp->keys[i];
2684 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2685 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2686 MGMT_STATUS_INVALID_PARAMS);
2691 hci_link_keys_clear(hdev);
2694 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2697 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2701 new_settings(hdev, NULL);
2703 for (i = 0; i < key_count; i++) {
2704 struct mgmt_link_key_info *key = &cp->keys[i];
2706 /* Always ignore debug keys and require a new pairing if
2707 * the user wants to use them.
2709 if (key->type == HCI_LK_DEBUG_COMBINATION)
2712 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2713 key->type, key->pin_len, NULL);
2716 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2718 hci_dev_unlock(hdev);
2723 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2724 u8 addr_type, struct sock *skip_sk)
2726 struct mgmt_ev_device_unpaired ev;
2728 bacpy(&ev.addr.bdaddr, bdaddr);
2729 ev.addr.type = addr_type;
2731 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2735 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2738 struct mgmt_cp_unpair_device *cp = data;
2739 struct mgmt_rp_unpair_device rp;
2740 struct hci_cp_disconnect dc;
2741 struct pending_cmd *cmd;
2742 struct hci_conn *conn;
2745 memset(&rp, 0, sizeof(rp));
2746 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2747 rp.addr.type = cp->addr.type;
2749 if (!bdaddr_type_is_valid(cp->addr.type))
2750 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2751 MGMT_STATUS_INVALID_PARAMS,
2754 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2755 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2756 MGMT_STATUS_INVALID_PARAMS,
2761 if (!hdev_is_powered(hdev)) {
2762 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2763 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2767 if (cp->addr.type == BDADDR_BREDR) {
2768 /* If disconnection is requested, then look up the
2769 * connection. If the remote device is connected, it
2770 * will be later used to terminate the link.
2772 * Setting it to NULL explicitly will cause no
2773 * termination of the link.
2776 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2781 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2785 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2788 /* Defer clearing up the connection parameters
2789 * until closing to give a chance of keeping
2790 * them if a repairing happens.
2792 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2794 /* If disconnection is not requested, then
2795 * clear the connection variable so that the
2796 * link is not terminated.
2798 if (!cp->disconnect)
2802 if (cp->addr.type == BDADDR_LE_PUBLIC)
2803 addr_type = ADDR_LE_DEV_PUBLIC;
2805 addr_type = ADDR_LE_DEV_RANDOM;
2807 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2809 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2813 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2814 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2818 /* If the connection variable is set, then termination of the
2819 * link is requested.
2822 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2824 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2828 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2835 cmd->cmd_complete = addr_cmd_complete;
2837 dc.handle = cpu_to_le16(conn->handle);
2838 dc.reason = 0x13; /* Remote User Terminated Connection */
2839 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2841 mgmt_pending_remove(cmd);
2844 hci_dev_unlock(hdev);
2848 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2851 struct mgmt_cp_disconnect *cp = data;
2852 struct mgmt_rp_disconnect rp;
2853 struct pending_cmd *cmd;
2854 struct hci_conn *conn;
2859 memset(&rp, 0, sizeof(rp));
2860 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2861 rp.addr.type = cp->addr.type;
2863 if (!bdaddr_type_is_valid(cp->addr.type))
2864 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2865 MGMT_STATUS_INVALID_PARAMS,
2870 if (!test_bit(HCI_UP, &hdev->flags)) {
2871 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2872 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2876 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2877 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2878 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2882 if (cp->addr.type == BDADDR_BREDR)
2883 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2886 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2888 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2889 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2890 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2894 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2900 cmd->cmd_complete = generic_cmd_complete;
2902 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2904 mgmt_pending_remove(cmd);
2907 hci_dev_unlock(hdev);
2911 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2913 switch (link_type) {
2915 switch (addr_type) {
2916 case ADDR_LE_DEV_PUBLIC:
2917 return BDADDR_LE_PUBLIC;
2920 /* Fallback to LE Random address type */
2921 return BDADDR_LE_RANDOM;
2925 /* Fallback to BR/EDR type */
2926 return BDADDR_BREDR;
2930 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2933 struct mgmt_rp_get_connections *rp;
2943 if (!hdev_is_powered(hdev)) {
2944 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2945 MGMT_STATUS_NOT_POWERED);
2950 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2951 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2955 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2956 rp = kmalloc(rp_len, GFP_KERNEL);
2963 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2964 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2966 bacpy(&rp->addr[i].bdaddr, &c->dst);
2967 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2968 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2973 rp->conn_count = cpu_to_le16(i);
2975 /* Recalculate length in case of filtered SCO connections, etc */
2976 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2978 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2984 hci_dev_unlock(hdev);
2988 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2989 struct mgmt_cp_pin_code_neg_reply *cp)
2991 struct pending_cmd *cmd;
2994 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2999 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3000 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3002 mgmt_pending_remove(cmd);
3007 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3010 struct hci_conn *conn;
3011 struct mgmt_cp_pin_code_reply *cp = data;
3012 struct hci_cp_pin_code_reply reply;
3013 struct pending_cmd *cmd;
3020 if (!hdev_is_powered(hdev)) {
3021 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3022 MGMT_STATUS_NOT_POWERED);
3026 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3028 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3029 MGMT_STATUS_NOT_CONNECTED);
3033 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3034 struct mgmt_cp_pin_code_neg_reply ncp;
3036 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3038 BT_ERR("PIN code is not 16 bytes long");
3040 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3042 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3043 MGMT_STATUS_INVALID_PARAMS);
3048 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3054 cmd->cmd_complete = addr_cmd_complete;
3056 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3057 reply.pin_len = cp->pin_len;
3058 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3060 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3062 mgmt_pending_remove(cmd);
3065 hci_dev_unlock(hdev);
3069 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3072 struct mgmt_cp_set_io_capability *cp = data;
3076 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3077 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3078 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3082 hdev->io_capability = cp->io_capability;
3084 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3085 hdev->io_capability);
3087 hci_dev_unlock(hdev);
3089 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3093 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3095 struct hci_dev *hdev = conn->hdev;
3096 struct pending_cmd *cmd;
3098 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3099 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3102 if (cmd->user_data != conn)
3111 static int pairing_complete(struct pending_cmd *cmd, u8 status)
3113 struct mgmt_rp_pair_device rp;
3114 struct hci_conn *conn = cmd->user_data;
3117 bacpy(&rp.addr.bdaddr, &conn->dst);
3118 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3120 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3123 /* So we don't get further callbacks for this connection */
3124 conn->connect_cfm_cb = NULL;
3125 conn->security_cfm_cb = NULL;
3126 conn->disconn_cfm_cb = NULL;
3128 hci_conn_drop(conn);
3130 /* The device is paired so there is no need to remove
3131 * its connection parameters anymore.
3133 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3140 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3142 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3143 struct pending_cmd *cmd;
3145 cmd = find_pairing(conn);
3147 cmd->cmd_complete(cmd, status);
3148 mgmt_pending_remove(cmd);
3152 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3154 struct pending_cmd *cmd;
3156 BT_DBG("status %u", status);
3158 cmd = find_pairing(conn);
3160 BT_DBG("Unable to find a pending command");
3164 cmd->cmd_complete(cmd, mgmt_status(status));
3165 mgmt_pending_remove(cmd);
3168 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3170 struct pending_cmd *cmd;
3172 BT_DBG("status %u", status);
3177 cmd = find_pairing(conn);
3179 BT_DBG("Unable to find a pending command");
3183 cmd->cmd_complete(cmd, mgmt_status(status));
3184 mgmt_pending_remove(cmd);
3187 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3190 struct mgmt_cp_pair_device *cp = data;
3191 struct mgmt_rp_pair_device rp;
3192 struct pending_cmd *cmd;
3193 u8 sec_level, auth_type;
3194 struct hci_conn *conn;
3199 memset(&rp, 0, sizeof(rp));
3200 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3201 rp.addr.type = cp->addr.type;
3203 if (!bdaddr_type_is_valid(cp->addr.type))
3204 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3205 MGMT_STATUS_INVALID_PARAMS,
3208 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3209 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3210 MGMT_STATUS_INVALID_PARAMS,
3215 if (!hdev_is_powered(hdev)) {
3216 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3217 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3221 sec_level = BT_SECURITY_MEDIUM;
3222 auth_type = HCI_AT_DEDICATED_BONDING;
3224 if (cp->addr.type == BDADDR_BREDR) {
3225 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3230 /* Convert from L2CAP channel address type to HCI address type
3232 if (cp->addr.type == BDADDR_LE_PUBLIC)
3233 addr_type = ADDR_LE_DEV_PUBLIC;
3235 addr_type = ADDR_LE_DEV_RANDOM;
3237 /* When pairing a new device, it is expected to remember
3238 * this device for future connections. Adding the connection
3239 * parameter information ahead of time allows tracking
3240 * of the slave preferred values and will speed up any
3241 * further connection establishment.
3243 * If connection parameters already exist, then they
3244 * will be kept and this function does nothing.
3246 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3248 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3249 sec_level, HCI_LE_CONN_TIMEOUT,
3256 if (PTR_ERR(conn) == -EBUSY)
3257 status = MGMT_STATUS_BUSY;
3258 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3259 status = MGMT_STATUS_NOT_SUPPORTED;
3260 else if (PTR_ERR(conn) == -ECONNREFUSED)
3261 status = MGMT_STATUS_REJECTED;
3263 status = MGMT_STATUS_CONNECT_FAILED;
3265 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3271 if (conn->connect_cfm_cb) {
3272 hci_conn_drop(conn);
3273 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3274 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3278 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3281 hci_conn_drop(conn);
3285 cmd->cmd_complete = pairing_complete;
3287 /* For LE, just connecting isn't a proof that the pairing finished */
3288 if (cp->addr.type == BDADDR_BREDR) {
3289 conn->connect_cfm_cb = pairing_complete_cb;
3290 conn->security_cfm_cb = pairing_complete_cb;
3291 conn->disconn_cfm_cb = pairing_complete_cb;
3293 conn->connect_cfm_cb = le_pairing_complete_cb;
3294 conn->security_cfm_cb = le_pairing_complete_cb;
3295 conn->disconn_cfm_cb = le_pairing_complete_cb;
3298 conn->io_capability = cp->io_cap;
3299 cmd->user_data = hci_conn_get(conn);
3301 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3302 hci_conn_security(conn, sec_level, auth_type, true)) {
3303 cmd->cmd_complete(cmd, 0);
3304 mgmt_pending_remove(cmd);
3310 hci_dev_unlock(hdev);
3314 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3317 struct mgmt_addr_info *addr = data;
3318 struct pending_cmd *cmd;
3319 struct hci_conn *conn;
3326 if (!hdev_is_powered(hdev)) {
3327 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3328 MGMT_STATUS_NOT_POWERED);
3332 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3334 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3335 MGMT_STATUS_INVALID_PARAMS);
3339 conn = cmd->user_data;
3341 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3342 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3343 MGMT_STATUS_INVALID_PARAMS);
3347 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3348 mgmt_pending_remove(cmd);
3350 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3351 addr, sizeof(*addr));
3353 hci_dev_unlock(hdev);
3357 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3358 struct mgmt_addr_info *addr, u16 mgmt_op,
3359 u16 hci_op, __le32 passkey)
3361 struct pending_cmd *cmd;
3362 struct hci_conn *conn;
3367 if (!hdev_is_powered(hdev)) {
3368 err = cmd_complete(sk, hdev->id, mgmt_op,
3369 MGMT_STATUS_NOT_POWERED, addr,
3374 if (addr->type == BDADDR_BREDR)
3375 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3377 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3380 err = cmd_complete(sk, hdev->id, mgmt_op,
3381 MGMT_STATUS_NOT_CONNECTED, addr,
3386 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3387 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3389 err = cmd_complete(sk, hdev->id, mgmt_op,
3390 MGMT_STATUS_SUCCESS, addr,
3393 err = cmd_complete(sk, hdev->id, mgmt_op,
3394 MGMT_STATUS_FAILED, addr,
3400 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3406 cmd->cmd_complete = addr_cmd_complete;
3408 /* Continue with pairing via HCI */
3409 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3410 struct hci_cp_user_passkey_reply cp;
3412 bacpy(&cp.bdaddr, &addr->bdaddr);
3413 cp.passkey = passkey;
3414 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3416 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3420 mgmt_pending_remove(cmd);
3423 hci_dev_unlock(hdev);
3427 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3428 void *data, u16 len)
3430 struct mgmt_cp_pin_code_neg_reply *cp = data;
3434 return user_pairing_resp(sk, hdev, &cp->addr,
3435 MGMT_OP_PIN_CODE_NEG_REPLY,
3436 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3439 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3442 struct mgmt_cp_user_confirm_reply *cp = data;
3446 if (len != sizeof(*cp))
3447 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3448 MGMT_STATUS_INVALID_PARAMS);
3450 return user_pairing_resp(sk, hdev, &cp->addr,
3451 MGMT_OP_USER_CONFIRM_REPLY,
3452 HCI_OP_USER_CONFIRM_REPLY, 0);
3455 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3456 void *data, u16 len)
3458 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3462 return user_pairing_resp(sk, hdev, &cp->addr,
3463 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3464 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3467 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3470 struct mgmt_cp_user_passkey_reply *cp = data;
3474 return user_pairing_resp(sk, hdev, &cp->addr,
3475 MGMT_OP_USER_PASSKEY_REPLY,
3476 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3479 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3480 void *data, u16 len)
3482 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3486 return user_pairing_resp(sk, hdev, &cp->addr,
3487 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3488 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3491 static void update_name(struct hci_request *req)
3493 struct hci_dev *hdev = req->hdev;
3494 struct hci_cp_write_local_name cp;
3496 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3498 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3501 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3503 struct mgmt_cp_set_local_name *cp;
3504 struct pending_cmd *cmd;
3506 BT_DBG("status 0x%02x", status);
3510 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3517 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3518 mgmt_status(status));
3520 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3523 mgmt_pending_remove(cmd);
3526 hci_dev_unlock(hdev);
3529 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3532 struct mgmt_cp_set_local_name *cp = data;
3533 struct pending_cmd *cmd;
3534 struct hci_request req;
3541 /* If the old values are the same as the new ones just return a
3542 * direct command complete event.
3544 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3545 !memcmp(hdev->short_name, cp->short_name,
3546 sizeof(hdev->short_name))) {
3547 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3552 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3554 if (!hdev_is_powered(hdev)) {
3555 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3557 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3562 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3568 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3574 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3576 hci_req_init(&req, hdev);
3578 if (lmp_bredr_capable(hdev)) {
3583 /* The name is stored in the scan response data and so
3584 * no need to udpate the advertising data here.
3586 if (lmp_le_capable(hdev))
3587 update_scan_rsp_data(&req);
3589 err = hci_req_run(&req, set_name_complete);
3591 mgmt_pending_remove(cmd);
3594 hci_dev_unlock(hdev);
3598 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3599 void *data, u16 data_len)
3601 struct pending_cmd *cmd;
3604 BT_DBG("%s", hdev->name);
3608 if (!hdev_is_powered(hdev)) {
3609 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3610 MGMT_STATUS_NOT_POWERED);
3614 if (!lmp_ssp_capable(hdev)) {
3615 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3616 MGMT_STATUS_NOT_SUPPORTED);
3620 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3621 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3626 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3632 if (bredr_sc_enabled(hdev))
3633 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3636 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3639 mgmt_pending_remove(cmd);
3642 hci_dev_unlock(hdev);
3646 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3647 void *data, u16 len)
3649 struct mgmt_addr_info *addr = data;
3652 BT_DBG("%s ", hdev->name);
3654 if (!bdaddr_type_is_valid(addr->type))
3655 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3656 MGMT_STATUS_INVALID_PARAMS, addr,
3661 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3662 struct mgmt_cp_add_remote_oob_data *cp = data;
3665 if (cp->addr.type != BDADDR_BREDR) {
3666 err = cmd_complete(sk, hdev->id,
3667 MGMT_OP_ADD_REMOTE_OOB_DATA,
3668 MGMT_STATUS_INVALID_PARAMS,
3669 &cp->addr, sizeof(cp->addr));
3673 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3674 cp->addr.type, cp->hash,
3675 cp->rand, NULL, NULL);
3677 status = MGMT_STATUS_FAILED;
3679 status = MGMT_STATUS_SUCCESS;
3681 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3682 status, &cp->addr, sizeof(cp->addr));
3683 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3684 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3685 u8 *rand192, *hash192, *rand256, *hash256;
3688 if (bdaddr_type_is_le(cp->addr.type)) {
3689 /* Enforce zero-valued 192-bit parameters as
3690 * long as legacy SMP OOB isn't implemented.
3692 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3693 memcmp(cp->hash192, ZERO_KEY, 16)) {
3694 err = cmd_complete(sk, hdev->id,
3695 MGMT_OP_ADD_REMOTE_OOB_DATA,
3696 MGMT_STATUS_INVALID_PARAMS,
3697 addr, sizeof(*addr));
3704 /* In case one of the P-192 values is set to zero,
3705 * then just disable OOB data for P-192.
3707 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3708 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3712 rand192 = cp->rand192;
3713 hash192 = cp->hash192;
3717 /* In case one of the P-256 values is set to zero, then just
3718 * disable OOB data for P-256.
3720 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3721 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3725 rand256 = cp->rand256;
3726 hash256 = cp->hash256;
3729 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3730 cp->addr.type, hash192, rand192,
3733 status = MGMT_STATUS_FAILED;
3735 status = MGMT_STATUS_SUCCESS;
3737 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3738 status, &cp->addr, sizeof(cp->addr));
3740 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3741 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3742 MGMT_STATUS_INVALID_PARAMS);
3746 hci_dev_unlock(hdev);
3750 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3751 void *data, u16 len)
3753 struct mgmt_cp_remove_remote_oob_data *cp = data;
3757 BT_DBG("%s", hdev->name);
3759 if (cp->addr.type != BDADDR_BREDR)
3760 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3761 MGMT_STATUS_INVALID_PARAMS,
3762 &cp->addr, sizeof(cp->addr));
3766 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3767 hci_remote_oob_data_clear(hdev);
3768 status = MGMT_STATUS_SUCCESS;
3772 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3774 status = MGMT_STATUS_INVALID_PARAMS;
3776 status = MGMT_STATUS_SUCCESS;
3779 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3780 status, &cp->addr, sizeof(cp->addr));
3782 hci_dev_unlock(hdev);
3786 static bool trigger_discovery(struct hci_request *req, u8 *status)
3788 struct hci_dev *hdev = req->hdev;
3789 struct hci_cp_le_set_scan_param param_cp;
3790 struct hci_cp_le_set_scan_enable enable_cp;
3791 struct hci_cp_inquiry inq_cp;
3792 /* General inquiry access code (GIAC) */
3793 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3797 switch (hdev->discovery.type) {
3798 case DISCOV_TYPE_BREDR:
3799 *status = mgmt_bredr_support(hdev);
3803 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3804 *status = MGMT_STATUS_BUSY;
3808 hci_inquiry_cache_flush(hdev);
3810 memset(&inq_cp, 0, sizeof(inq_cp));
3811 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3812 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3813 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3816 case DISCOV_TYPE_LE:
3817 case DISCOV_TYPE_INTERLEAVED:
3818 *status = mgmt_le_support(hdev);
3822 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3823 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3824 *status = MGMT_STATUS_NOT_SUPPORTED;
3828 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3829 /* Don't let discovery abort an outgoing
3830 * connection attempt that's using directed
3833 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3835 *status = MGMT_STATUS_REJECTED;
3839 disable_advertising(req);
3842 /* If controller is scanning, it means the background scanning
3843 * is running. Thus, we should temporarily stop it in order to
3844 * set the discovery scanning parameters.
3846 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3847 hci_req_add_le_scan_disable(req);
3849 memset(¶m_cp, 0, sizeof(param_cp));
3851 /* All active scans will be done with either a resolvable
3852 * private address (when privacy feature has been enabled)
3853 * or non-resolvable private address.
3855 err = hci_update_random_address(req, true, &own_addr_type);
3857 *status = MGMT_STATUS_FAILED;
3861 param_cp.type = LE_SCAN_ACTIVE;
3862 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3863 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3864 param_cp.own_address_type = own_addr_type;
3865 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3868 memset(&enable_cp, 0, sizeof(enable_cp));
3869 enable_cp.enable = LE_SCAN_ENABLE;
3870 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3871 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3876 *status = MGMT_STATUS_INVALID_PARAMS;
3883 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3886 struct pending_cmd *cmd;
3887 unsigned long timeout;
3889 BT_DBG("status %d", status);
3893 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3895 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3898 cmd->cmd_complete(cmd, mgmt_status(status));
3899 mgmt_pending_remove(cmd);
3903 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3907 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3909 /* If the scan involves LE scan, pick proper timeout to schedule
3910 * hdev->le_scan_disable that will stop it.
3912 switch (hdev->discovery.type) {
3913 case DISCOV_TYPE_LE:
3914 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3916 case DISCOV_TYPE_INTERLEAVED:
3917 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3919 case DISCOV_TYPE_BREDR:
3923 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3929 /* When service discovery is used and the controller has
3930 * a strict duplicate filter, it is important to remember
3931 * the start and duration of the scan. This is required
3932 * for restarting scanning during the discovery phase.
3934 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
3936 (hdev->discovery.uuid_count > 0 ||
3937 hdev->discovery.rssi != HCI_RSSI_INVALID)) {
3938 hdev->discovery.scan_start = jiffies;
3939 hdev->discovery.scan_duration = timeout;
3942 queue_delayed_work(hdev->workqueue,
3943 &hdev->le_scan_disable, timeout);
3947 hci_dev_unlock(hdev);
3950 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3951 void *data, u16 len)
3953 struct mgmt_cp_start_discovery *cp = data;
3954 struct pending_cmd *cmd;
3955 struct hci_request req;
3959 BT_DBG("%s", hdev->name);
3963 if (!hdev_is_powered(hdev)) {
3964 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3965 MGMT_STATUS_NOT_POWERED,
3966 &cp->type, sizeof(cp->type));
3970 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3971 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3972 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3973 MGMT_STATUS_BUSY, &cp->type,
3978 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3984 cmd->cmd_complete = generic_cmd_complete;
3986 /* Clear the discovery filter first to free any previously
3987 * allocated memory for the UUID list.
3989 hci_discovery_filter_clear(hdev);
3991 hdev->discovery.type = cp->type;
3992 hdev->discovery.report_invalid_rssi = false;
3994 hci_req_init(&req, hdev);
3996 if (!trigger_discovery(&req, &status)) {
3997 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3998 status, &cp->type, sizeof(cp->type));
3999 mgmt_pending_remove(cmd);
4003 err = hci_req_run(&req, start_discovery_complete);
4005 mgmt_pending_remove(cmd);
4009 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4012 hci_dev_unlock(hdev);
4016 static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
4018 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4022 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4023 void *data, u16 len)
4025 struct mgmt_cp_start_service_discovery *cp = data;
4026 struct pending_cmd *cmd;
4027 struct hci_request req;
4028 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4029 u16 uuid_count, expected_len;
4033 BT_DBG("%s", hdev->name);
4037 if (!hdev_is_powered(hdev)) {
4038 err = cmd_complete(sk, hdev->id,
4039 MGMT_OP_START_SERVICE_DISCOVERY,
4040 MGMT_STATUS_NOT_POWERED,
4041 &cp->type, sizeof(cp->type));
4045 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4046 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
4047 err = cmd_complete(sk, hdev->id,
4048 MGMT_OP_START_SERVICE_DISCOVERY,
4049 MGMT_STATUS_BUSY, &cp->type,
4054 uuid_count = __le16_to_cpu(cp->uuid_count);
4055 if (uuid_count > max_uuid_count) {
4056 BT_ERR("service_discovery: too big uuid_count value %u",
4058 err = cmd_complete(sk, hdev->id,
4059 MGMT_OP_START_SERVICE_DISCOVERY,
4060 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4065 expected_len = sizeof(*cp) + uuid_count * 16;
4066 if (expected_len != len) {
4067 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4069 err = cmd_complete(sk, hdev->id,
4070 MGMT_OP_START_SERVICE_DISCOVERY,
4071 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4076 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4083 cmd->cmd_complete = service_discovery_cmd_complete;
4085 /* Clear the discovery filter first to free any previously
4086 * allocated memory for the UUID list.
4088 hci_discovery_filter_clear(hdev);
4090 hdev->discovery.type = cp->type;
4091 hdev->discovery.rssi = cp->rssi;
4092 hdev->discovery.uuid_count = uuid_count;
4094 if (uuid_count > 0) {
4095 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4097 if (!hdev->discovery.uuids) {
4098 err = cmd_complete(sk, hdev->id,
4099 MGMT_OP_START_SERVICE_DISCOVERY,
4101 &cp->type, sizeof(cp->type));
4102 mgmt_pending_remove(cmd);
4107 hci_req_init(&req, hdev);
4109 if (!trigger_discovery(&req, &status)) {
4110 err = cmd_complete(sk, hdev->id,
4111 MGMT_OP_START_SERVICE_DISCOVERY,
4112 status, &cp->type, sizeof(cp->type));
4113 mgmt_pending_remove(cmd);
4117 err = hci_req_run(&req, start_discovery_complete);
4119 mgmt_pending_remove(cmd);
4123 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4126 hci_dev_unlock(hdev);
4130 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4132 struct pending_cmd *cmd;
4134 BT_DBG("status %d", status);
4138 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4140 cmd->cmd_complete(cmd, mgmt_status(status));
4141 mgmt_pending_remove(cmd);
4145 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4147 hci_dev_unlock(hdev);
4150 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4153 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4154 struct pending_cmd *cmd;
4155 struct hci_request req;
4158 BT_DBG("%s", hdev->name);
4162 if (!hci_discovery_active(hdev)) {
4163 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4164 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4165 sizeof(mgmt_cp->type));
4169 if (hdev->discovery.type != mgmt_cp->type) {
4170 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4171 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4172 sizeof(mgmt_cp->type));
4176 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4182 cmd->cmd_complete = generic_cmd_complete;
4184 hci_req_init(&req, hdev);
4186 hci_stop_discovery(&req);
4188 err = hci_req_run(&req, stop_discovery_complete);
4190 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4194 mgmt_pending_remove(cmd);
4196 /* If no HCI commands were sent we're done */
4197 if (err == -ENODATA) {
4198 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4199 &mgmt_cp->type, sizeof(mgmt_cp->type));
4200 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4204 hci_dev_unlock(hdev);
4208 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4211 struct mgmt_cp_confirm_name *cp = data;
4212 struct inquiry_entry *e;
4215 BT_DBG("%s", hdev->name);
4219 if (!hci_discovery_active(hdev)) {
4220 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4221 MGMT_STATUS_FAILED, &cp->addr,
4226 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4228 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4229 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4234 if (cp->name_known) {
4235 e->name_state = NAME_KNOWN;
4238 e->name_state = NAME_NEEDED;
4239 hci_inquiry_cache_update_resolve(hdev, e);
4242 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4246 hci_dev_unlock(hdev);
4250 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4253 struct mgmt_cp_block_device *cp = data;
4257 BT_DBG("%s", hdev->name);
4259 if (!bdaddr_type_is_valid(cp->addr.type))
4260 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4261 MGMT_STATUS_INVALID_PARAMS,
4262 &cp->addr, sizeof(cp->addr));
4266 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4269 status = MGMT_STATUS_FAILED;
4273 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4275 status = MGMT_STATUS_SUCCESS;
4278 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4279 &cp->addr, sizeof(cp->addr));
4281 hci_dev_unlock(hdev);
4286 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4289 struct mgmt_cp_unblock_device *cp = data;
4293 BT_DBG("%s", hdev->name);
4295 if (!bdaddr_type_is_valid(cp->addr.type))
4296 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4297 MGMT_STATUS_INVALID_PARAMS,
4298 &cp->addr, sizeof(cp->addr));
4302 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4305 status = MGMT_STATUS_INVALID_PARAMS;
4309 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4311 status = MGMT_STATUS_SUCCESS;
4314 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4315 &cp->addr, sizeof(cp->addr));
4317 hci_dev_unlock(hdev);
4322 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4325 struct mgmt_cp_set_device_id *cp = data;
4326 struct hci_request req;
4330 BT_DBG("%s", hdev->name);
4332 source = __le16_to_cpu(cp->source);
4334 if (source > 0x0002)
4335 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4336 MGMT_STATUS_INVALID_PARAMS);
4340 hdev->devid_source = source;
4341 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4342 hdev->devid_product = __le16_to_cpu(cp->product);
4343 hdev->devid_version = __le16_to_cpu(cp->version);
4345 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4347 hci_req_init(&req, hdev);
4349 hci_req_run(&req, NULL);
4351 hci_dev_unlock(hdev);
4356 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4359 struct cmd_lookup match = { NULL, hdev };
4364 u8 mgmt_err = mgmt_status(status);
4366 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4367 cmd_status_rsp, &mgmt_err);
4371 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4372 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4374 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4376 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4379 new_settings(hdev, match.sk);
4385 hci_dev_unlock(hdev);
4388 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4391 struct mgmt_mode *cp = data;
4392 struct pending_cmd *cmd;
4393 struct hci_request req;
4394 u8 val, enabled, status;
4397 BT_DBG("request for %s", hdev->name);
4399 status = mgmt_le_support(hdev);
4401 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4404 if (cp->val != 0x00 && cp->val != 0x01)
4405 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4406 MGMT_STATUS_INVALID_PARAMS);
4411 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4413 /* The following conditions are ones which mean that we should
4414 * not do any HCI communication but directly send a mgmt
4415 * response to user space (after toggling the flag if
4418 if (!hdev_is_powered(hdev) || val == enabled ||
4419 hci_conn_num(hdev, LE_LINK) > 0 ||
4420 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4421 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4422 bool changed = false;
4424 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4425 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4429 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4434 err = new_settings(hdev, sk);
4439 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4440 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4441 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4446 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4452 hci_req_init(&req, hdev);
4455 enable_advertising(&req);
4457 disable_advertising(&req);
4459 err = hci_req_run(&req, set_advertising_complete);
4461 mgmt_pending_remove(cmd);
4464 hci_dev_unlock(hdev);
4468 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4469 void *data, u16 len)
4471 struct mgmt_cp_set_static_address *cp = data;
4474 BT_DBG("%s", hdev->name);
4476 if (!lmp_le_capable(hdev))
4477 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4478 MGMT_STATUS_NOT_SUPPORTED);
4480 if (hdev_is_powered(hdev))
4481 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4482 MGMT_STATUS_REJECTED);
4484 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4485 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4486 return cmd_status(sk, hdev->id,
4487 MGMT_OP_SET_STATIC_ADDRESS,
4488 MGMT_STATUS_INVALID_PARAMS);
4490 /* Two most significant bits shall be set */
4491 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4492 return cmd_status(sk, hdev->id,
4493 MGMT_OP_SET_STATIC_ADDRESS,
4494 MGMT_STATUS_INVALID_PARAMS);
4499 bacpy(&hdev->static_addr, &cp->bdaddr);
4501 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4503 hci_dev_unlock(hdev);
4508 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4509 void *data, u16 len)
4511 struct mgmt_cp_set_scan_params *cp = data;
4512 __u16 interval, window;
4515 BT_DBG("%s", hdev->name);
4517 if (!lmp_le_capable(hdev))
4518 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4519 MGMT_STATUS_NOT_SUPPORTED);
4521 interval = __le16_to_cpu(cp->interval);
4523 if (interval < 0x0004 || interval > 0x4000)
4524 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4525 MGMT_STATUS_INVALID_PARAMS);
4527 window = __le16_to_cpu(cp->window);
4529 if (window < 0x0004 || window > 0x4000)
4530 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4531 MGMT_STATUS_INVALID_PARAMS);
4533 if (window > interval)
4534 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4535 MGMT_STATUS_INVALID_PARAMS);
4539 hdev->le_scan_interval = interval;
4540 hdev->le_scan_window = window;
4542 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4544 /* If background scan is running, restart it so new parameters are
4547 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4548 hdev->discovery.state == DISCOVERY_STOPPED) {
4549 struct hci_request req;
4551 hci_req_init(&req, hdev);
4553 hci_req_add_le_scan_disable(&req);
4554 hci_req_add_le_passive_scan(&req);
4556 hci_req_run(&req, NULL);
4559 hci_dev_unlock(hdev);
4564 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4567 struct pending_cmd *cmd;
4569 BT_DBG("status 0x%02x", status);
4573 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4578 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4579 mgmt_status(status));
4581 struct mgmt_mode *cp = cmd->param;
4584 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4586 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4588 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4589 new_settings(hdev, cmd->sk);
4592 mgmt_pending_remove(cmd);
4595 hci_dev_unlock(hdev);
4598 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4599 void *data, u16 len)
4601 struct mgmt_mode *cp = data;
4602 struct pending_cmd *cmd;
4603 struct hci_request req;
4606 BT_DBG("%s", hdev->name);
4608 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4609 hdev->hci_ver < BLUETOOTH_VER_1_2)
4610 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4611 MGMT_STATUS_NOT_SUPPORTED);
4613 if (cp->val != 0x00 && cp->val != 0x01)
4614 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4615 MGMT_STATUS_INVALID_PARAMS);
4617 if (!hdev_is_powered(hdev))
4618 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4619 MGMT_STATUS_NOT_POWERED);
4621 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4622 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4623 MGMT_STATUS_REJECTED);
4627 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4628 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4633 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4634 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4639 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4646 hci_req_init(&req, hdev);
4648 write_fast_connectable(&req, cp->val);
4650 err = hci_req_run(&req, fast_connectable_complete);
4652 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4653 MGMT_STATUS_FAILED);
4654 mgmt_pending_remove(cmd);
4658 hci_dev_unlock(hdev);
4663 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4665 struct pending_cmd *cmd;
4667 BT_DBG("status 0x%02x", status);
4671 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4676 u8 mgmt_err = mgmt_status(status);
4678 /* We need to restore the flag if related HCI commands
4681 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4683 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4685 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4686 new_settings(hdev, cmd->sk);
4689 mgmt_pending_remove(cmd);
4692 hci_dev_unlock(hdev);
4695 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4697 struct mgmt_mode *cp = data;
4698 struct pending_cmd *cmd;
4699 struct hci_request req;
4702 BT_DBG("request for %s", hdev->name);
4704 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4705 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4706 MGMT_STATUS_NOT_SUPPORTED);
4708 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4709 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4710 MGMT_STATUS_REJECTED);
4712 if (cp->val != 0x00 && cp->val != 0x01)
4713 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4714 MGMT_STATUS_INVALID_PARAMS);
4718 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4719 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4723 if (!hdev_is_powered(hdev)) {
4725 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4726 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4727 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4728 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4729 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4732 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4734 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4738 err = new_settings(hdev, sk);
4742 /* Reject disabling when powered on */
4744 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4745 MGMT_STATUS_REJECTED);
4748 /* When configuring a dual-mode controller to operate
4749 * with LE only and using a static address, then switching
4750 * BR/EDR back on is not allowed.
4752 * Dual-mode controllers shall operate with the public
4753 * address as its identity address for BR/EDR and LE. So
4754 * reject the attempt to create an invalid configuration.
4756 * The same restrictions applies when secure connections
4757 * has been enabled. For BR/EDR this is a controller feature
4758 * while for LE it is a host stack feature. This means that
4759 * switching BR/EDR back on when secure connections has been
4760 * enabled is not a supported transaction.
4762 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4763 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4764 test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) {
4765 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4766 MGMT_STATUS_REJECTED);
4771 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4772 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4777 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4783 /* We need to flip the bit already here so that update_adv_data
4784 * generates the correct flags.
4786 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4788 hci_req_init(&req, hdev);
4790 write_fast_connectable(&req, false);
4791 __hci_update_page_scan(&req);
4793 /* Since only the advertising data flags will change, there
4794 * is no need to update the scan response data.
4796 update_adv_data(&req);
4798 err = hci_req_run(&req, set_bredr_complete);
4800 mgmt_pending_remove(cmd);
4803 hci_dev_unlock(hdev);
4807 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4809 struct pending_cmd *cmd;
4810 struct mgmt_mode *cp;
4812 BT_DBG("%s status %u", hdev->name, status);
4816 cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4821 cmd_status(cmd->sk, cmd->index, cmd->opcode,
4822 mgmt_status(status));
4830 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4831 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4834 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4835 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4838 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4839 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4843 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4844 new_settings(hdev, cmd->sk);
4847 mgmt_pending_remove(cmd);
4849 hci_dev_unlock(hdev);
4852 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4853 void *data, u16 len)
4855 struct mgmt_mode *cp = data;
4856 struct pending_cmd *cmd;
4857 struct hci_request req;
4861 BT_DBG("request for %s", hdev->name);
4863 if (!lmp_sc_capable(hdev) &&
4864 !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4865 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4866 MGMT_STATUS_NOT_SUPPORTED);
4868 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4869 lmp_sc_capable(hdev) &&
4870 !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4871 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4872 MGMT_STATUS_REJECTED);
4874 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4875 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4876 MGMT_STATUS_INVALID_PARAMS);
4880 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4881 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4885 changed = !test_and_set_bit(HCI_SC_ENABLED,
4887 if (cp->val == 0x02)
4888 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4890 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4892 changed = test_and_clear_bit(HCI_SC_ENABLED,
4894 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4897 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4902 err = new_settings(hdev, sk);
4907 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4908 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4915 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4916 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4917 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4921 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4927 hci_req_init(&req, hdev);
4928 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4929 err = hci_req_run(&req, sc_enable_complete);
4931 mgmt_pending_remove(cmd);
4936 hci_dev_unlock(hdev);
4940 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4941 void *data, u16 len)
4943 struct mgmt_mode *cp = data;
4944 bool changed, use_changed;
4947 BT_DBG("request for %s", hdev->name);
4949 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4950 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4951 MGMT_STATUS_INVALID_PARAMS);
4956 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4959 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4962 if (cp->val == 0x02)
4963 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4966 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4969 if (hdev_is_powered(hdev) && use_changed &&
4970 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4971 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4972 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4973 sizeof(mode), &mode);
4976 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4981 err = new_settings(hdev, sk);
4984 hci_dev_unlock(hdev);
4988 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4991 struct mgmt_cp_set_privacy *cp = cp_data;
4995 BT_DBG("request for %s", hdev->name);
4997 if (!lmp_le_capable(hdev))
4998 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4999 MGMT_STATUS_NOT_SUPPORTED);
5001 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5002 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5003 MGMT_STATUS_INVALID_PARAMS);
5005 if (hdev_is_powered(hdev))
5006 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5007 MGMT_STATUS_REJECTED);
5011 /* If user space supports this command it is also expected to
5012 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5014 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5017 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
5018 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5019 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
5021 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
5022 memset(hdev->irk, 0, sizeof(hdev->irk));
5023 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
5026 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5031 err = new_settings(hdev, sk);
5034 hci_dev_unlock(hdev);
5038 static bool irk_is_valid(struct mgmt_irk_info *irk)
5040 switch (irk->addr.type) {
5041 case BDADDR_LE_PUBLIC:
5044 case BDADDR_LE_RANDOM:
5045 /* Two most significant bits shall be set */
5046 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5054 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5057 struct mgmt_cp_load_irks *cp = cp_data;
5058 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5059 sizeof(struct mgmt_irk_info));
5060 u16 irk_count, expected_len;
5063 BT_DBG("request for %s", hdev->name);
5065 if (!lmp_le_capable(hdev))
5066 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5067 MGMT_STATUS_NOT_SUPPORTED);
5069 irk_count = __le16_to_cpu(cp->irk_count);
5070 if (irk_count > max_irk_count) {
5071 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5072 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5073 MGMT_STATUS_INVALID_PARAMS);
5076 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5077 if (expected_len != len) {
5078 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5080 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5081 MGMT_STATUS_INVALID_PARAMS);
5084 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5086 for (i = 0; i < irk_count; i++) {
5087 struct mgmt_irk_info *key = &cp->irks[i];
5089 if (!irk_is_valid(key))
5090 return cmd_status(sk, hdev->id,
5092 MGMT_STATUS_INVALID_PARAMS);
5097 hci_smp_irks_clear(hdev);
5099 for (i = 0; i < irk_count; i++) {
5100 struct mgmt_irk_info *irk = &cp->irks[i];
5103 if (irk->addr.type == BDADDR_LE_PUBLIC)
5104 addr_type = ADDR_LE_DEV_PUBLIC;
5106 addr_type = ADDR_LE_DEV_RANDOM;
5108 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5112 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5114 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5116 hci_dev_unlock(hdev);
5121 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5123 if (key->master != 0x00 && key->master != 0x01)
5126 switch (key->addr.type) {
5127 case BDADDR_LE_PUBLIC:
5130 case BDADDR_LE_RANDOM:
5131 /* Two most significant bits shall be set */
5132 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5140 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5141 void *cp_data, u16 len)
5143 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5144 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5145 sizeof(struct mgmt_ltk_info));
5146 u16 key_count, expected_len;
5149 BT_DBG("request for %s", hdev->name);
5151 if (!lmp_le_capable(hdev))
5152 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5153 MGMT_STATUS_NOT_SUPPORTED);
5155 key_count = __le16_to_cpu(cp->key_count);
5156 if (key_count > max_key_count) {
5157 BT_ERR("load_ltks: too big key_count value %u", key_count);
5158 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5159 MGMT_STATUS_INVALID_PARAMS);
5162 expected_len = sizeof(*cp) + key_count *
5163 sizeof(struct mgmt_ltk_info);
5164 if (expected_len != len) {
5165 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5167 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5168 MGMT_STATUS_INVALID_PARAMS);
5171 BT_DBG("%s key_count %u", hdev->name, key_count);
5173 for (i = 0; i < key_count; i++) {
5174 struct mgmt_ltk_info *key = &cp->keys[i];
5176 if (!ltk_is_valid(key))
5177 return cmd_status(sk, hdev->id,
5178 MGMT_OP_LOAD_LONG_TERM_KEYS,
5179 MGMT_STATUS_INVALID_PARAMS);
5184 hci_smp_ltks_clear(hdev);
5186 for (i = 0; i < key_count; i++) {
5187 struct mgmt_ltk_info *key = &cp->keys[i];
5188 u8 type, addr_type, authenticated;
5190 if (key->addr.type == BDADDR_LE_PUBLIC)
5191 addr_type = ADDR_LE_DEV_PUBLIC;
5193 addr_type = ADDR_LE_DEV_RANDOM;
5195 switch (key->type) {
5196 case MGMT_LTK_UNAUTHENTICATED:
5197 authenticated = 0x00;
5198 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5200 case MGMT_LTK_AUTHENTICATED:
5201 authenticated = 0x01;
5202 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5204 case MGMT_LTK_P256_UNAUTH:
5205 authenticated = 0x00;
5206 type = SMP_LTK_P256;
5208 case MGMT_LTK_P256_AUTH:
5209 authenticated = 0x01;
5210 type = SMP_LTK_P256;
5212 case MGMT_LTK_P256_DEBUG:
5213 authenticated = 0x00;
5214 type = SMP_LTK_P256_DEBUG;
5219 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5220 authenticated, key->val, key->enc_size, key->ediv,
5224 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5227 hci_dev_unlock(hdev);
5232 static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5234 struct hci_conn *conn = cmd->user_data;
5235 struct mgmt_rp_get_conn_info rp;
5238 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5240 if (status == MGMT_STATUS_SUCCESS) {
5241 rp.rssi = conn->rssi;
5242 rp.tx_power = conn->tx_power;
5243 rp.max_tx_power = conn->max_tx_power;
5245 rp.rssi = HCI_RSSI_INVALID;
5246 rp.tx_power = HCI_TX_POWER_INVALID;
5247 rp.max_tx_power = HCI_TX_POWER_INVALID;
5250 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5253 hci_conn_drop(conn);
5259 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5262 struct hci_cp_read_rssi *cp;
5263 struct pending_cmd *cmd;
5264 struct hci_conn *conn;
5268 BT_DBG("status 0x%02x", hci_status);
5272 /* Commands sent in request are either Read RSSI or Read Transmit Power
5273 * Level so we check which one was last sent to retrieve connection
5274 * handle. Both commands have handle as first parameter so it's safe to
5275 * cast data on the same command struct.
5277 * First command sent is always Read RSSI and we fail only if it fails.
5278 * In other case we simply override error to indicate success as we
5279 * already remembered if TX power value is actually valid.
5281 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5283 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5284 status = MGMT_STATUS_SUCCESS;
5286 status = mgmt_status(hci_status);
5290 BT_ERR("invalid sent_cmd in conn_info response");
5294 handle = __le16_to_cpu(cp->handle);
5295 conn = hci_conn_hash_lookup_handle(hdev, handle);
5297 BT_ERR("unknown handle (%d) in conn_info response", handle);
5301 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5305 cmd->cmd_complete(cmd, status);
5306 mgmt_pending_remove(cmd);
5309 hci_dev_unlock(hdev);
5312 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5315 struct mgmt_cp_get_conn_info *cp = data;
5316 struct mgmt_rp_get_conn_info rp;
5317 struct hci_conn *conn;
5318 unsigned long conn_info_age;
5321 BT_DBG("%s", hdev->name);
5323 memset(&rp, 0, sizeof(rp));
5324 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5325 rp.addr.type = cp->addr.type;
5327 if (!bdaddr_type_is_valid(cp->addr.type))
5328 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5329 MGMT_STATUS_INVALID_PARAMS,
5334 if (!hdev_is_powered(hdev)) {
5335 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5336 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5340 if (cp->addr.type == BDADDR_BREDR)
5341 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5344 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5346 if (!conn || conn->state != BT_CONNECTED) {
5347 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5348 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5352 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5353 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5354 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5358 /* To avoid client trying to guess when to poll again for information we
5359 * calculate conn info age as random value between min/max set in hdev.
5361 conn_info_age = hdev->conn_info_min_age +
5362 prandom_u32_max(hdev->conn_info_max_age -
5363 hdev->conn_info_min_age);
5365 /* Query controller to refresh cached values if they are too old or were
5368 if (time_after(jiffies, conn->conn_info_timestamp +
5369 msecs_to_jiffies(conn_info_age)) ||
5370 !conn->conn_info_timestamp) {
5371 struct hci_request req;
5372 struct hci_cp_read_tx_power req_txp_cp;
5373 struct hci_cp_read_rssi req_rssi_cp;
5374 struct pending_cmd *cmd;
5376 hci_req_init(&req, hdev);
5377 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5378 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5381 /* For LE links TX power does not change thus we don't need to
5382 * query for it once value is known.
5384 if (!bdaddr_type_is_le(cp->addr.type) ||
5385 conn->tx_power == HCI_TX_POWER_INVALID) {
5386 req_txp_cp.handle = cpu_to_le16(conn->handle);
5387 req_txp_cp.type = 0x00;
5388 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5389 sizeof(req_txp_cp), &req_txp_cp);
5392 /* Max TX power needs to be read only once per connection */
5393 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5394 req_txp_cp.handle = cpu_to_le16(conn->handle);
5395 req_txp_cp.type = 0x01;
5396 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5397 sizeof(req_txp_cp), &req_txp_cp);
5400 err = hci_req_run(&req, conn_info_refresh_complete);
5404 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5411 hci_conn_hold(conn);
5412 cmd->user_data = hci_conn_get(conn);
5413 cmd->cmd_complete = conn_info_cmd_complete;
5415 conn->conn_info_timestamp = jiffies;
5417 /* Cache is valid, just reply with values cached in hci_conn */
5418 rp.rssi = conn->rssi;
5419 rp.tx_power = conn->tx_power;
5420 rp.max_tx_power = conn->max_tx_power;
5422 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5423 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5427 hci_dev_unlock(hdev);
5431 static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5433 struct hci_conn *conn = cmd->user_data;
5434 struct mgmt_rp_get_clock_info rp;
5435 struct hci_dev *hdev;
5438 memset(&rp, 0, sizeof(rp));
5439 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5444 hdev = hci_dev_get(cmd->index);
5446 rp.local_clock = cpu_to_le32(hdev->clock);
5451 rp.piconet_clock = cpu_to_le32(conn->clock);
5452 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5456 err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5460 hci_conn_drop(conn);
5467 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5469 struct hci_cp_read_clock *hci_cp;
5470 struct pending_cmd *cmd;
5471 struct hci_conn *conn;
5473 BT_DBG("%s status %u", hdev->name, status);
5477 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5481 if (hci_cp->which) {
5482 u16 handle = __le16_to_cpu(hci_cp->handle);
5483 conn = hci_conn_hash_lookup_handle(hdev, handle);
5488 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5492 cmd->cmd_complete(cmd, mgmt_status(status));
5493 mgmt_pending_remove(cmd);
5496 hci_dev_unlock(hdev);
5499 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5502 struct mgmt_cp_get_clock_info *cp = data;
5503 struct mgmt_rp_get_clock_info rp;
5504 struct hci_cp_read_clock hci_cp;
5505 struct pending_cmd *cmd;
5506 struct hci_request req;
5507 struct hci_conn *conn;
5510 BT_DBG("%s", hdev->name);
5512 memset(&rp, 0, sizeof(rp));
5513 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5514 rp.addr.type = cp->addr.type;
5516 if (cp->addr.type != BDADDR_BREDR)
5517 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5518 MGMT_STATUS_INVALID_PARAMS,
5523 if (!hdev_is_powered(hdev)) {
5524 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5525 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5529 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5530 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5532 if (!conn || conn->state != BT_CONNECTED) {
5533 err = cmd_complete(sk, hdev->id,
5534 MGMT_OP_GET_CLOCK_INFO,
5535 MGMT_STATUS_NOT_CONNECTED,
5543 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5549 cmd->cmd_complete = clock_info_cmd_complete;
5551 hci_req_init(&req, hdev);
5553 memset(&hci_cp, 0, sizeof(hci_cp));
5554 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5557 hci_conn_hold(conn);
5558 cmd->user_data = hci_conn_get(conn);
5560 hci_cp.handle = cpu_to_le16(conn->handle);
5561 hci_cp.which = 0x01; /* Piconet clock */
5562 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5565 err = hci_req_run(&req, get_clock_info_complete);
5567 mgmt_pending_remove(cmd);
5570 hci_dev_unlock(hdev);
5574 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5576 struct hci_conn *conn;
5578 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5582 if (conn->dst_type != type)
5585 if (conn->state != BT_CONNECTED)
5591 /* This function requires the caller holds hdev->lock */
5592 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5593 u8 addr_type, u8 auto_connect)
5595 struct hci_dev *hdev = req->hdev;
5596 struct hci_conn_params *params;
5598 params = hci_conn_params_add(hdev, addr, addr_type);
5602 if (params->auto_connect == auto_connect)
5605 list_del_init(¶ms->action);
5607 switch (auto_connect) {
5608 case HCI_AUTO_CONN_DISABLED:
5609 case HCI_AUTO_CONN_LINK_LOSS:
5610 __hci_update_background_scan(req);
5612 case HCI_AUTO_CONN_REPORT:
5613 list_add(¶ms->action, &hdev->pend_le_reports);
5614 __hci_update_background_scan(req);
5616 case HCI_AUTO_CONN_DIRECT:
5617 case HCI_AUTO_CONN_ALWAYS:
5618 if (!is_connected(hdev, addr, addr_type)) {
5619 list_add(¶ms->action, &hdev->pend_le_conns);
5620 __hci_update_background_scan(req);
5625 params->auto_connect = auto_connect;
5627 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5633 static void device_added(struct sock *sk, struct hci_dev *hdev,
5634 bdaddr_t *bdaddr, u8 type, u8 action)
5636 struct mgmt_ev_device_added ev;
5638 bacpy(&ev.addr.bdaddr, bdaddr);
5639 ev.addr.type = type;
5642 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5645 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5647 struct pending_cmd *cmd;
5649 BT_DBG("status 0x%02x", status);
5653 cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
5657 cmd->cmd_complete(cmd, mgmt_status(status));
5658 mgmt_pending_remove(cmd);
5661 hci_dev_unlock(hdev);
5664 static int add_device(struct sock *sk, struct hci_dev *hdev,
5665 void *data, u16 len)
5667 struct mgmt_cp_add_device *cp = data;
5668 struct pending_cmd *cmd;
5669 struct hci_request req;
5670 u8 auto_conn, addr_type;
5673 BT_DBG("%s", hdev->name);
5675 if (!bdaddr_type_is_valid(cp->addr.type) ||
5676 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5677 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5678 MGMT_STATUS_INVALID_PARAMS,
5679 &cp->addr, sizeof(cp->addr));
5681 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5682 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5683 MGMT_STATUS_INVALID_PARAMS,
5684 &cp->addr, sizeof(cp->addr));
5686 hci_req_init(&req, hdev);
5690 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5696 cmd->cmd_complete = addr_cmd_complete;
5698 if (cp->addr.type == BDADDR_BREDR) {
5699 /* Only incoming connections action is supported for now */
5700 if (cp->action != 0x01) {
5701 err = cmd->cmd_complete(cmd,
5702 MGMT_STATUS_INVALID_PARAMS);
5703 mgmt_pending_remove(cmd);
5707 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5712 __hci_update_page_scan(&req);
5717 if (cp->addr.type == BDADDR_LE_PUBLIC)
5718 addr_type = ADDR_LE_DEV_PUBLIC;
5720 addr_type = ADDR_LE_DEV_RANDOM;
5722 if (cp->action == 0x02)
5723 auto_conn = HCI_AUTO_CONN_ALWAYS;
5724 else if (cp->action == 0x01)
5725 auto_conn = HCI_AUTO_CONN_DIRECT;
5727 auto_conn = HCI_AUTO_CONN_REPORT;
5729 /* If the connection parameters don't exist for this device,
5730 * they will be created and configured with defaults.
5732 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5734 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5735 mgmt_pending_remove(cmd);
5740 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5742 err = hci_req_run(&req, add_device_complete);
5744 /* ENODATA means no HCI commands were needed (e.g. if
5745 * the adapter is powered off).
5747 if (err == -ENODATA)
5748 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5749 mgmt_pending_remove(cmd);
5753 hci_dev_unlock(hdev);
5757 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5758 bdaddr_t *bdaddr, u8 type)
5760 struct mgmt_ev_device_removed ev;
5762 bacpy(&ev.addr.bdaddr, bdaddr);
5763 ev.addr.type = type;
5765 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5768 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5770 struct pending_cmd *cmd;
5772 BT_DBG("status 0x%02x", status);
5776 cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5780 cmd->cmd_complete(cmd, mgmt_status(status));
5781 mgmt_pending_remove(cmd);
5784 hci_dev_unlock(hdev);
5787 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5788 void *data, u16 len)
5790 struct mgmt_cp_remove_device *cp = data;
5791 struct pending_cmd *cmd;
5792 struct hci_request req;
5795 BT_DBG("%s", hdev->name);
5797 hci_req_init(&req, hdev);
5801 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5807 cmd->cmd_complete = addr_cmd_complete;
5809 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5810 struct hci_conn_params *params;
5813 if (!bdaddr_type_is_valid(cp->addr.type)) {
5814 err = cmd->cmd_complete(cmd,
5815 MGMT_STATUS_INVALID_PARAMS);
5816 mgmt_pending_remove(cmd);
5820 if (cp->addr.type == BDADDR_BREDR) {
5821 err = hci_bdaddr_list_del(&hdev->whitelist,
5825 err = cmd->cmd_complete(cmd,
5826 MGMT_STATUS_INVALID_PARAMS);
5827 mgmt_pending_remove(cmd);
5831 __hci_update_page_scan(&req);
5833 device_removed(sk, hdev, &cp->addr.bdaddr,
5838 if (cp->addr.type == BDADDR_LE_PUBLIC)
5839 addr_type = ADDR_LE_DEV_PUBLIC;
5841 addr_type = ADDR_LE_DEV_RANDOM;
5843 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5846 err = cmd->cmd_complete(cmd,
5847 MGMT_STATUS_INVALID_PARAMS);
5848 mgmt_pending_remove(cmd);
5852 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5853 err = cmd->cmd_complete(cmd,
5854 MGMT_STATUS_INVALID_PARAMS);
5855 mgmt_pending_remove(cmd);
5859 list_del(¶ms->action);
5860 list_del(¶ms->list);
5862 __hci_update_background_scan(&req);
5864 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5866 struct hci_conn_params *p, *tmp;
5867 struct bdaddr_list *b, *btmp;
5869 if (cp->addr.type) {
5870 err = cmd->cmd_complete(cmd,
5871 MGMT_STATUS_INVALID_PARAMS);
5872 mgmt_pending_remove(cmd);
5876 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5877 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5882 __hci_update_page_scan(&req);
5884 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5885 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5887 device_removed(sk, hdev, &p->addr, p->addr_type);
5888 list_del(&p->action);
5893 BT_DBG("All LE connection parameters were removed");
5895 __hci_update_background_scan(&req);
5899 err = hci_req_run(&req, remove_device_complete);
5901 /* ENODATA means no HCI commands were needed (e.g. if
5902 * the adapter is powered off).
5904 if (err == -ENODATA)
5905 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5906 mgmt_pending_remove(cmd);
5910 hci_dev_unlock(hdev);
5914 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5917 struct mgmt_cp_load_conn_param *cp = data;
5918 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5919 sizeof(struct mgmt_conn_param));
5920 u16 param_count, expected_len;
5923 if (!lmp_le_capable(hdev))
5924 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5925 MGMT_STATUS_NOT_SUPPORTED);
5927 param_count = __le16_to_cpu(cp->param_count);
5928 if (param_count > max_param_count) {
5929 BT_ERR("load_conn_param: too big param_count value %u",
5931 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5932 MGMT_STATUS_INVALID_PARAMS);
5935 expected_len = sizeof(*cp) + param_count *
5936 sizeof(struct mgmt_conn_param);
5937 if (expected_len != len) {
5938 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5940 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5941 MGMT_STATUS_INVALID_PARAMS);
5944 BT_DBG("%s param_count %u", hdev->name, param_count);
5948 hci_conn_params_clear_disabled(hdev);
5950 for (i = 0; i < param_count; i++) {
5951 struct mgmt_conn_param *param = &cp->params[i];
5952 struct hci_conn_params *hci_param;
5953 u16 min, max, latency, timeout;
5956 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5959 if (param->addr.type == BDADDR_LE_PUBLIC) {
5960 addr_type = ADDR_LE_DEV_PUBLIC;
5961 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5962 addr_type = ADDR_LE_DEV_RANDOM;
5964 BT_ERR("Ignoring invalid connection parameters");
5968 min = le16_to_cpu(param->min_interval);
5969 max = le16_to_cpu(param->max_interval);
5970 latency = le16_to_cpu(param->latency);
5971 timeout = le16_to_cpu(param->timeout);
5973 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5974 min, max, latency, timeout);
5976 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5977 BT_ERR("Ignoring invalid connection parameters");
5981 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5984 BT_ERR("Failed to add connection parameters");
5988 hci_param->conn_min_interval = min;
5989 hci_param->conn_max_interval = max;
5990 hci_param->conn_latency = latency;
5991 hci_param->supervision_timeout = timeout;
5994 hci_dev_unlock(hdev);
5996 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5999 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6000 void *data, u16 len)
6002 struct mgmt_cp_set_external_config *cp = data;
6006 BT_DBG("%s", hdev->name);
6008 if (hdev_is_powered(hdev))
6009 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6010 MGMT_STATUS_REJECTED);
6012 if (cp->config != 0x00 && cp->config != 0x01)
6013 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6014 MGMT_STATUS_INVALID_PARAMS);
6016 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6017 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6018 MGMT_STATUS_NOT_SUPPORTED);
6023 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
6026 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
6029 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6036 err = new_options(hdev, sk);
6038 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
6039 mgmt_index_removed(hdev);
6041 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6042 set_bit(HCI_CONFIG, &hdev->dev_flags);
6043 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
6045 queue_work(hdev->req_workqueue, &hdev->power_on);
6047 set_bit(HCI_RAW, &hdev->flags);
6048 mgmt_index_added(hdev);
6053 hci_dev_unlock(hdev);
6057 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6058 void *data, u16 len)
6060 struct mgmt_cp_set_public_address *cp = data;
6064 BT_DBG("%s", hdev->name);
6066 if (hdev_is_powered(hdev))
6067 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6068 MGMT_STATUS_REJECTED);
6070 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6071 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6072 MGMT_STATUS_INVALID_PARAMS);
6074 if (!hdev->set_bdaddr)
6075 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6076 MGMT_STATUS_NOT_SUPPORTED);
6080 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6081 bacpy(&hdev->public_addr, &cp->bdaddr);
6083 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6090 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6091 err = new_options(hdev, sk);
6093 if (is_configured(hdev)) {
6094 mgmt_index_removed(hdev);
6096 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
6098 set_bit(HCI_CONFIG, &hdev->dev_flags);
6099 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
6101 queue_work(hdev->req_workqueue, &hdev->power_on);
6105 hci_dev_unlock(hdev);
6109 static const struct mgmt_handler {
6110 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
6114 } mgmt_handlers[] = {
6115 { NULL }, /* 0x0000 (no command) */
6116 { read_version, false, MGMT_READ_VERSION_SIZE },
6117 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
6118 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
6119 { read_controller_info, false, MGMT_READ_INFO_SIZE },
6120 { set_powered, false, MGMT_SETTING_SIZE },
6121 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
6122 { set_connectable, false, MGMT_SETTING_SIZE },
6123 { set_fast_connectable, false, MGMT_SETTING_SIZE },
6124 { set_bondable, false, MGMT_SETTING_SIZE },
6125 { set_link_security, false, MGMT_SETTING_SIZE },
6126 { set_ssp, false, MGMT_SETTING_SIZE },
6127 { set_hs, false, MGMT_SETTING_SIZE },
6128 { set_le, false, MGMT_SETTING_SIZE },
6129 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
6130 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
6131 { add_uuid, false, MGMT_ADD_UUID_SIZE },
6132 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
6133 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
6134 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
6135 { disconnect, false, MGMT_DISCONNECT_SIZE },
6136 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
6137 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
6138 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6139 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
6140 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
6141 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6142 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
6143 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
6144 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6145 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
6146 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6147 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6148 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
6149 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6150 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
6151 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
6152 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
6153 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
6154 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
6155 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
6156 { set_advertising, false, MGMT_SETTING_SIZE },
6157 { set_bredr, false, MGMT_SETTING_SIZE },
6158 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
6159 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
6160 { set_secure_conn, false, MGMT_SETTING_SIZE },
6161 { set_debug_keys, false, MGMT_SETTING_SIZE },
6162 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
6163 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
6164 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
6165 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
6166 { add_device, false, MGMT_ADD_DEVICE_SIZE },
6167 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
6168 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
6169 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
6170 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
6171 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
6172 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
6173 { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
6176 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
6180 struct mgmt_hdr *hdr;
6181 u16 opcode, index, len;
6182 struct hci_dev *hdev = NULL;
6183 const struct mgmt_handler *handler;
6186 BT_DBG("got %zu bytes", msglen);
6188 if (msglen < sizeof(*hdr))
6191 buf = kmalloc(msglen, GFP_KERNEL);
6195 if (memcpy_from_msg(buf, msg, msglen)) {
6201 opcode = __le16_to_cpu(hdr->opcode);
6202 index = __le16_to_cpu(hdr->index);
6203 len = __le16_to_cpu(hdr->len);
6205 if (len != msglen - sizeof(*hdr)) {
6210 if (index != MGMT_INDEX_NONE) {
6211 hdev = hci_dev_get(index);
6213 err = cmd_status(sk, index, opcode,
6214 MGMT_STATUS_INVALID_INDEX);
6218 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
6219 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
6220 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
6221 err = cmd_status(sk, index, opcode,
6222 MGMT_STATUS_INVALID_INDEX);
6226 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
6227 opcode != MGMT_OP_READ_CONFIG_INFO &&
6228 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
6229 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
6230 err = cmd_status(sk, index, opcode,
6231 MGMT_STATUS_INVALID_INDEX);
6236 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
6237 mgmt_handlers[opcode].func == NULL) {
6238 BT_DBG("Unknown op %u", opcode);
6239 err = cmd_status(sk, index, opcode,
6240 MGMT_STATUS_UNKNOWN_COMMAND);
6244 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
6245 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6246 err = cmd_status(sk, index, opcode,
6247 MGMT_STATUS_INVALID_INDEX);
6251 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
6252 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6253 err = cmd_status(sk, index, opcode,
6254 MGMT_STATUS_INVALID_INDEX);
6258 handler = &mgmt_handlers[opcode];
6260 if ((handler->var_len && len < handler->data_len) ||
6261 (!handler->var_len && len != handler->data_len)) {
6262 err = cmd_status(sk, index, opcode,
6263 MGMT_STATUS_INVALID_PARAMS);
6268 mgmt_init_hdev(sk, hdev);
6270 cp = buf + sizeof(*hdr);
6272 err = handler->func(sk, hdev, cp, len);
6286 void mgmt_index_added(struct hci_dev *hdev)
6288 if (hdev->dev_type != HCI_BREDR)
6291 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6294 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6295 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6297 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6300 void mgmt_index_removed(struct hci_dev *hdev)
6302 u8 status = MGMT_STATUS_INVALID_INDEX;
6304 if (hdev->dev_type != HCI_BREDR)
6307 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6310 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6312 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6313 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6315 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6318 /* This function requires the caller holds hdev->lock */
6319 static void restart_le_actions(struct hci_request *req)
6321 struct hci_dev *hdev = req->hdev;
6322 struct hci_conn_params *p;
6324 list_for_each_entry(p, &hdev->le_conn_params, list) {
6325 /* Needed for AUTO_OFF case where might not "really"
6326 * have been powered off.
6328 list_del_init(&p->action);
6330 switch (p->auto_connect) {
6331 case HCI_AUTO_CONN_DIRECT:
6332 case HCI_AUTO_CONN_ALWAYS:
6333 list_add(&p->action, &hdev->pend_le_conns);
6335 case HCI_AUTO_CONN_REPORT:
6336 list_add(&p->action, &hdev->pend_le_reports);
6343 __hci_update_background_scan(req);
6346 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6348 struct cmd_lookup match = { NULL, hdev };
6350 BT_DBG("status 0x%02x", status);
6353 /* Register the available SMP channels (BR/EDR and LE) only
6354 * when successfully powering on the controller. This late
6355 * registration is required so that LE SMP can clearly
6356 * decide if the public address or static address is used.
6363 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6365 new_settings(hdev, match.sk);
6367 hci_dev_unlock(hdev);
6373 static int powered_update_hci(struct hci_dev *hdev)
6375 struct hci_request req;
6378 hci_req_init(&req, hdev);
6380 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6381 !lmp_host_ssp_capable(hdev)) {
6384 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
6386 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6389 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
6390 sizeof(support), &support);
6394 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6395 lmp_bredr_capable(hdev)) {
6396 struct hci_cp_write_le_host_supported cp;
6401 /* Check first if we already have the right
6402 * host state (host features set)
6404 if (cp.le != lmp_host_le_capable(hdev) ||
6405 cp.simul != lmp_host_le_br_capable(hdev))
6406 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6410 if (lmp_le_capable(hdev)) {
6411 /* Make sure the controller has a good default for
6412 * advertising data. This also applies to the case
6413 * where BR/EDR was toggled during the AUTO_OFF phase.
6415 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6416 update_adv_data(&req);
6417 update_scan_rsp_data(&req);
6420 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6421 enable_advertising(&req);
6423 restart_le_actions(&req);
6426 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6427 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6428 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6429 sizeof(link_sec), &link_sec);
6431 if (lmp_bredr_capable(hdev)) {
6432 write_fast_connectable(&req, false);
6433 __hci_update_page_scan(&req);
6439 return hci_req_run(&req, powered_complete);
6442 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6444 struct cmd_lookup match = { NULL, hdev };
6445 u8 status, zero_cod[] = { 0, 0, 0 };
6448 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6452 if (powered_update_hci(hdev) == 0)
6455 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6460 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6462 /* If the power off is because of hdev unregistration let
6463 * use the appropriate INVALID_INDEX status. Otherwise use
6464 * NOT_POWERED. We cover both scenarios here since later in
6465 * mgmt_index_removed() any hci_conn callbacks will have already
6466 * been triggered, potentially causing misleading DISCONNECTED
6469 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
6470 status = MGMT_STATUS_INVALID_INDEX;
6472 status = MGMT_STATUS_NOT_POWERED;
6474 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6476 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6477 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6478 zero_cod, sizeof(zero_cod), NULL);
6481 err = new_settings(hdev, match.sk);
6489 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6491 struct pending_cmd *cmd;
6494 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6498 if (err == -ERFKILL)
6499 status = MGMT_STATUS_RFKILLED;
6501 status = MGMT_STATUS_FAILED;
6503 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6505 mgmt_pending_remove(cmd);
6508 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6510 struct hci_request req;
6514 /* When discoverable timeout triggers, then just make sure
6515 * the limited discoverable flag is cleared. Even in the case
6516 * of a timeout triggered from general discoverable, it is
6517 * safe to unconditionally clear the flag.
6519 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6520 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6522 hci_req_init(&req, hdev);
6523 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6524 u8 scan = SCAN_PAGE;
6525 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6526 sizeof(scan), &scan);
6529 update_adv_data(&req);
6530 hci_req_run(&req, NULL);
6532 hdev->discov_timeout = 0;
6534 new_settings(hdev, NULL);
6536 hci_dev_unlock(hdev);
6539 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6542 struct mgmt_ev_new_link_key ev;
6544 memset(&ev, 0, sizeof(ev));
6546 ev.store_hint = persistent;
6547 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6548 ev.key.addr.type = BDADDR_BREDR;
6549 ev.key.type = key->type;
6550 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6551 ev.key.pin_len = key->pin_len;
6553 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6556 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6558 switch (ltk->type) {
6561 if (ltk->authenticated)
6562 return MGMT_LTK_AUTHENTICATED;
6563 return MGMT_LTK_UNAUTHENTICATED;
6565 if (ltk->authenticated)
6566 return MGMT_LTK_P256_AUTH;
6567 return MGMT_LTK_P256_UNAUTH;
6568 case SMP_LTK_P256_DEBUG:
6569 return MGMT_LTK_P256_DEBUG;
6572 return MGMT_LTK_UNAUTHENTICATED;
6575 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6577 struct mgmt_ev_new_long_term_key ev;
6579 memset(&ev, 0, sizeof(ev));
6581 /* Devices using resolvable or non-resolvable random addresses
6582 * without providing an indentity resolving key don't require
6583 * to store long term keys. Their addresses will change the
6586 * Only when a remote device provides an identity address
6587 * make sure the long term key is stored. If the remote
6588 * identity is known, the long term keys are internally
6589 * mapped to the identity address. So allow static random
6590 * and public addresses here.
6592 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6593 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6594 ev.store_hint = 0x00;
6596 ev.store_hint = persistent;
6598 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6599 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6600 ev.key.type = mgmt_ltk_type(key);
6601 ev.key.enc_size = key->enc_size;
6602 ev.key.ediv = key->ediv;
6603 ev.key.rand = key->rand;
6605 if (key->type == SMP_LTK)
6608 memcpy(ev.key.val, key->val, sizeof(key->val));
6610 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6613 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6615 struct mgmt_ev_new_irk ev;
6617 memset(&ev, 0, sizeof(ev));
6619 /* For identity resolving keys from devices that are already
6620 * using a public address or static random address, do not
6621 * ask for storing this key. The identity resolving key really
6622 * is only mandatory for devices using resovlable random
6625 * Storing all identity resolving keys has the downside that
6626 * they will be also loaded on next boot of they system. More
6627 * identity resolving keys, means more time during scanning is
6628 * needed to actually resolve these addresses.
6630 if (bacmp(&irk->rpa, BDADDR_ANY))
6631 ev.store_hint = 0x01;
6633 ev.store_hint = 0x00;
6635 bacpy(&ev.rpa, &irk->rpa);
6636 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6637 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6638 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6640 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6643 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6646 struct mgmt_ev_new_csrk ev;
6648 memset(&ev, 0, sizeof(ev));
6650 /* Devices using resolvable or non-resolvable random addresses
6651 * without providing an indentity resolving key don't require
6652 * to store signature resolving keys. Their addresses will change
6653 * the next time around.
6655 * Only when a remote device provides an identity address
6656 * make sure the signature resolving key is stored. So allow
6657 * static random and public addresses here.
6659 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6660 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6661 ev.store_hint = 0x00;
6663 ev.store_hint = persistent;
6665 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6666 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6667 ev.key.type = csrk->type;
6668 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6670 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6673 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6674 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6675 u16 max_interval, u16 latency, u16 timeout)
6677 struct mgmt_ev_new_conn_param ev;
6679 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6682 memset(&ev, 0, sizeof(ev));
6683 bacpy(&ev.addr.bdaddr, bdaddr);
6684 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6685 ev.store_hint = store_hint;
6686 ev.min_interval = cpu_to_le16(min_interval);
6687 ev.max_interval = cpu_to_le16(max_interval);
6688 ev.latency = cpu_to_le16(latency);
6689 ev.timeout = cpu_to_le16(timeout);
6691 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6694 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6697 eir[eir_len++] = sizeof(type) + data_len;
6698 eir[eir_len++] = type;
6699 memcpy(&eir[eir_len], data, data_len);
6700 eir_len += data_len;
6705 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6706 u32 flags, u8 *name, u8 name_len)
6709 struct mgmt_ev_device_connected *ev = (void *) buf;
6712 bacpy(&ev->addr.bdaddr, &conn->dst);
6713 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6715 ev->flags = __cpu_to_le32(flags);
6717 /* We must ensure that the EIR Data fields are ordered and
6718 * unique. Keep it simple for now and avoid the problem by not
6719 * adding any BR/EDR data to the LE adv.
6721 if (conn->le_adv_data_len > 0) {
6722 memcpy(&ev->eir[eir_len],
6723 conn->le_adv_data, conn->le_adv_data_len);
6724 eir_len = conn->le_adv_data_len;
6727 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6730 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6731 eir_len = eir_append_data(ev->eir, eir_len,
6733 conn->dev_class, 3);
6736 ev->eir_len = cpu_to_le16(eir_len);
6738 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6739 sizeof(*ev) + eir_len, NULL);
6742 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6744 struct sock **sk = data;
6746 cmd->cmd_complete(cmd, 0);
6751 mgmt_pending_remove(cmd);
6754 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6756 struct hci_dev *hdev = data;
6757 struct mgmt_cp_unpair_device *cp = cmd->param;
6759 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6761 cmd->cmd_complete(cmd, 0);
6762 mgmt_pending_remove(cmd);
6765 bool mgmt_powering_down(struct hci_dev *hdev)
6767 struct pending_cmd *cmd;
6768 struct mgmt_mode *cp;
6770 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6781 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6782 u8 link_type, u8 addr_type, u8 reason,
6783 bool mgmt_connected)
6785 struct mgmt_ev_device_disconnected ev;
6786 struct sock *sk = NULL;
6788 /* The connection is still in hci_conn_hash so test for 1
6789 * instead of 0 to know if this is the last one.
6791 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6792 cancel_delayed_work(&hdev->power_off);
6793 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6796 if (!mgmt_connected)
6799 if (link_type != ACL_LINK && link_type != LE_LINK)
6802 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6804 bacpy(&ev.addr.bdaddr, bdaddr);
6805 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6808 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6813 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6817 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6818 u8 link_type, u8 addr_type, u8 status)
6820 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6821 struct mgmt_cp_disconnect *cp;
6822 struct pending_cmd *cmd;
6824 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6827 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6833 if (bacmp(bdaddr, &cp->addr.bdaddr))
6836 if (cp->addr.type != bdaddr_type)
6839 cmd->cmd_complete(cmd, mgmt_status(status));
6840 mgmt_pending_remove(cmd);
6843 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6844 u8 addr_type, u8 status)
6846 struct mgmt_ev_connect_failed ev;
6848 /* The connection is still in hci_conn_hash so test for 1
6849 * instead of 0 to know if this is the last one.
6851 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6852 cancel_delayed_work(&hdev->power_off);
6853 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6856 bacpy(&ev.addr.bdaddr, bdaddr);
6857 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6858 ev.status = mgmt_status(status);
6860 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6863 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6865 struct mgmt_ev_pin_code_request ev;
6867 bacpy(&ev.addr.bdaddr, bdaddr);
6868 ev.addr.type = BDADDR_BREDR;
6871 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6874 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6877 struct pending_cmd *cmd;
6879 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6883 cmd->cmd_complete(cmd, mgmt_status(status));
6884 mgmt_pending_remove(cmd);
6887 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6890 struct pending_cmd *cmd;
6892 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6896 cmd->cmd_complete(cmd, mgmt_status(status));
6897 mgmt_pending_remove(cmd);
6900 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6901 u8 link_type, u8 addr_type, u32 value,
6904 struct mgmt_ev_user_confirm_request ev;
6906 BT_DBG("%s", hdev->name);
6908 bacpy(&ev.addr.bdaddr, bdaddr);
6909 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6910 ev.confirm_hint = confirm_hint;
6911 ev.value = cpu_to_le32(value);
6913 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6917 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6918 u8 link_type, u8 addr_type)
6920 struct mgmt_ev_user_passkey_request ev;
6922 BT_DBG("%s", hdev->name);
6924 bacpy(&ev.addr.bdaddr, bdaddr);
6925 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6927 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6931 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6932 u8 link_type, u8 addr_type, u8 status,
6935 struct pending_cmd *cmd;
6937 cmd = mgmt_pending_find(opcode, hdev);
6941 cmd->cmd_complete(cmd, mgmt_status(status));
6942 mgmt_pending_remove(cmd);
6947 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6948 u8 link_type, u8 addr_type, u8 status)
6950 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6951 status, MGMT_OP_USER_CONFIRM_REPLY);
6954 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6955 u8 link_type, u8 addr_type, u8 status)
6957 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6959 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6962 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6963 u8 link_type, u8 addr_type, u8 status)
6965 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6966 status, MGMT_OP_USER_PASSKEY_REPLY);
6969 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6970 u8 link_type, u8 addr_type, u8 status)
6972 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6974 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6977 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6978 u8 link_type, u8 addr_type, u32 passkey,
6981 struct mgmt_ev_passkey_notify ev;
6983 BT_DBG("%s", hdev->name);
6985 bacpy(&ev.addr.bdaddr, bdaddr);
6986 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6987 ev.passkey = __cpu_to_le32(passkey);
6988 ev.entered = entered;
6990 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6993 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6995 struct mgmt_ev_auth_failed ev;
6996 struct pending_cmd *cmd;
6997 u8 status = mgmt_status(hci_status);
6999 bacpy(&ev.addr.bdaddr, &conn->dst);
7000 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7003 cmd = find_pairing(conn);
7005 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7006 cmd ? cmd->sk : NULL);
7009 cmd->cmd_complete(cmd, status);
7010 mgmt_pending_remove(cmd);
7014 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7016 struct cmd_lookup match = { NULL, hdev };
7020 u8 mgmt_err = mgmt_status(status);
7021 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7022 cmd_status_rsp, &mgmt_err);
7026 if (test_bit(HCI_AUTH, &hdev->flags))
7027 changed = !test_and_set_bit(HCI_LINK_SECURITY,
7030 changed = test_and_clear_bit(HCI_LINK_SECURITY,
7033 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7037 new_settings(hdev, match.sk);
7043 static void clear_eir(struct hci_request *req)
7045 struct hci_dev *hdev = req->hdev;
7046 struct hci_cp_write_eir cp;
7048 if (!lmp_ext_inq_capable(hdev))
7051 memset(hdev->eir, 0, sizeof(hdev->eir));
7053 memset(&cp, 0, sizeof(cp));
7055 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7058 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7060 struct cmd_lookup match = { NULL, hdev };
7061 struct hci_request req;
7062 bool changed = false;
7065 u8 mgmt_err = mgmt_status(status);
7067 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
7068 &hdev->dev_flags)) {
7069 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
7070 new_settings(hdev, NULL);
7073 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7079 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
7081 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
7083 changed = test_and_clear_bit(HCI_HS_ENABLED,
7086 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
7089 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7092 new_settings(hdev, match.sk);
7097 hci_req_init(&req, hdev);
7099 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
7100 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
7101 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7102 sizeof(enable), &enable);
7108 hci_req_run(&req, NULL);
7111 static void sk_lookup(struct pending_cmd *cmd, void *data)
7113 struct cmd_lookup *match = data;
7115 if (match->sk == NULL) {
7116 match->sk = cmd->sk;
7117 sock_hold(match->sk);
7121 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7124 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7126 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7127 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7128 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7131 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
7138 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7140 struct mgmt_cp_set_local_name ev;
7141 struct pending_cmd *cmd;
7146 memset(&ev, 0, sizeof(ev));
7147 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7148 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7150 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7152 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7154 /* If this is a HCI command related to powering on the
7155 * HCI dev don't send any mgmt signals.
7157 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
7161 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7162 cmd ? cmd->sk : NULL);
7165 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7166 u8 *rand192, u8 *hash256, u8 *rand256,
7169 struct pending_cmd *cmd;
7171 BT_DBG("%s status %u", hdev->name, status);
7173 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7178 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7179 mgmt_status(status));
7181 struct mgmt_rp_read_local_oob_data rp;
7182 size_t rp_size = sizeof(rp);
7184 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7185 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7187 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7188 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7189 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7191 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7194 cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7198 mgmt_pending_remove(cmd);
7201 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7205 for (i = 0; i < uuid_count; i++) {
7206 if (!memcmp(uuid, uuids[i], 16))
7213 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7217 while (parsed < eir_len) {
7218 u8 field_len = eir[0];
7225 if (eir_len - parsed < field_len + 1)
7229 case EIR_UUID16_ALL:
7230 case EIR_UUID16_SOME:
7231 for (i = 0; i + 3 <= field_len; i += 2) {
7232 memcpy(uuid, bluetooth_base_uuid, 16);
7233 uuid[13] = eir[i + 3];
7234 uuid[12] = eir[i + 2];
7235 if (has_uuid(uuid, uuid_count, uuids))
7239 case EIR_UUID32_ALL:
7240 case EIR_UUID32_SOME:
7241 for (i = 0; i + 5 <= field_len; i += 4) {
7242 memcpy(uuid, bluetooth_base_uuid, 16);
7243 uuid[15] = eir[i + 5];
7244 uuid[14] = eir[i + 4];
7245 uuid[13] = eir[i + 3];
7246 uuid[12] = eir[i + 2];
7247 if (has_uuid(uuid, uuid_count, uuids))
7251 case EIR_UUID128_ALL:
7252 case EIR_UUID128_SOME:
7253 for (i = 0; i + 17 <= field_len; i += 16) {
7254 memcpy(uuid, eir + i + 2, 16);
7255 if (has_uuid(uuid, uuid_count, uuids))
7261 parsed += field_len + 1;
7262 eir += field_len + 1;
7268 static void restart_le_scan(struct hci_dev *hdev)
7270 /* If controller is not scanning we are done. */
7271 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
7274 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7275 hdev->discovery.scan_start +
7276 hdev->discovery.scan_duration))
7279 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
7280 DISCOV_LE_RESTART_DELAY);
7283 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7284 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7285 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7288 struct mgmt_ev_device_found *ev = (void *) buf;
7292 /* Don't send events for a non-kernel initiated discovery. With
7293 * LE one exception is if we have pend_le_reports > 0 in which
7294 * case we're doing passive scanning and want these events.
7296 if (!hci_discovery_active(hdev)) {
7297 if (link_type == ACL_LINK)
7299 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7303 /* When using service discovery with a RSSI threshold, then check
7304 * if such a RSSI threshold is specified. If a RSSI threshold has
7305 * been specified, and HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set,
7306 * then all results with a RSSI smaller than the RSSI threshold will be
7307 * dropped. If the quirk is set, let it through for further processing,
7308 * as we might need to restart the scan.
7310 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7311 * the results are also dropped.
7313 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7314 (rssi == HCI_RSSI_INVALID ||
7315 (rssi < hdev->discovery.rssi &&
7316 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7319 /* Make sure that the buffer is big enough. The 5 extra bytes
7320 * are for the potential CoD field.
7322 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7325 memset(buf, 0, sizeof(buf));
7327 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7328 * RSSI value was reported as 0 when not available. This behavior
7329 * is kept when using device discovery. This is required for full
7330 * backwards compatibility with the API.
7332 * However when using service discovery, the value 127 will be
7333 * returned when the RSSI is not available.
7335 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7336 link_type == ACL_LINK)
7339 bacpy(&ev->addr.bdaddr, bdaddr);
7340 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7342 ev->flags = cpu_to_le32(flags);
7345 /* When using service discovery and a list of UUID is
7346 * provided, results with no matching UUID should be
7347 * dropped. In case there is a match the result is
7348 * kept and checking possible scan response data
7351 if (hdev->discovery.uuid_count > 0) {
7352 match = eir_has_uuids(eir, eir_len,
7353 hdev->discovery.uuid_count,
7354 hdev->discovery.uuids);
7355 /* If duplicate filtering does not report RSSI changes,
7356 * then restart scanning to ensure updated result with
7357 * updated RSSI values.
7359 if (match && test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
7361 restart_le_scan(hdev);
7366 if (!match && !scan_rsp_len)
7369 /* Copy EIR or advertising data into event */
7370 memcpy(ev->eir, eir, eir_len);
7372 /* When using service discovery and a list of UUID is
7373 * provided, results with empty EIR or advertising data
7374 * should be dropped since they do not match any UUID.
7376 if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
7382 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7383 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7386 if (scan_rsp_len > 0) {
7387 /* When using service discovery and a list of UUID is
7388 * provided, results with no matching UUID should be
7389 * dropped if there is no previous match from the
7392 if (hdev->discovery.uuid_count > 0) {
7393 if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
7394 hdev->discovery.uuid_count,
7395 hdev->discovery.uuids))
7398 /* If duplicate filtering does not report RSSI changes,
7399 * then restart scanning to ensure updated result with
7400 * updated RSSI values.
7402 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
7404 restart_le_scan(hdev);
7407 /* Append scan response data to event */
7408 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7410 /* When using service discovery and a list of UUID is
7411 * provided, results with empty scan response and no
7412 * previous matched advertising data should be dropped.
7414 if (hdev->discovery.uuid_count > 0 && !match)
7418 /* Validate the reported RSSI value against the RSSI threshold once more
7419 * incase HCI_QUIRK_STRICT_DUPLICATE_FILTER forced a restart of LE
7422 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7423 rssi < hdev->discovery.rssi)
7426 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7427 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7429 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7432 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7433 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7435 struct mgmt_ev_device_found *ev;
7436 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7439 ev = (struct mgmt_ev_device_found *) buf;
7441 memset(buf, 0, sizeof(buf));
7443 bacpy(&ev->addr.bdaddr, bdaddr);
7444 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7447 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7450 ev->eir_len = cpu_to_le16(eir_len);
7452 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7455 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7457 struct mgmt_ev_discovering ev;
7459 BT_DBG("%s discovering %u", hdev->name, discovering);
7461 memset(&ev, 0, sizeof(ev));
7462 ev.type = hdev->discovery.type;
7463 ev.discovering = discovering;
7465 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7468 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7470 BT_DBG("%s status %u", hdev->name, status);
7473 void mgmt_reenable_advertising(struct hci_dev *hdev)
7475 struct hci_request req;
7477 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7480 hci_req_init(&req, hdev);
7481 enable_advertising(&req);
7482 hci_req_run(&req, adv_enable_complete);