2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "hci_request.h"
38 #define MGMT_VERSION 1
39 #define MGMT_REVISION 8
41 static const u16 mgmt_commands[] = {
42 MGMT_OP_READ_INDEX_LIST,
45 MGMT_OP_SET_DISCOVERABLE,
46 MGMT_OP_SET_CONNECTABLE,
47 MGMT_OP_SET_FAST_CONNECTABLE,
49 MGMT_OP_SET_LINK_SECURITY,
53 MGMT_OP_SET_DEV_CLASS,
54 MGMT_OP_SET_LOCAL_NAME,
57 MGMT_OP_LOAD_LINK_KEYS,
58 MGMT_OP_LOAD_LONG_TERM_KEYS,
60 MGMT_OP_GET_CONNECTIONS,
61 MGMT_OP_PIN_CODE_REPLY,
62 MGMT_OP_PIN_CODE_NEG_REPLY,
63 MGMT_OP_SET_IO_CAPABILITY,
65 MGMT_OP_CANCEL_PAIR_DEVICE,
66 MGMT_OP_UNPAIR_DEVICE,
67 MGMT_OP_USER_CONFIRM_REPLY,
68 MGMT_OP_USER_CONFIRM_NEG_REPLY,
69 MGMT_OP_USER_PASSKEY_REPLY,
70 MGMT_OP_USER_PASSKEY_NEG_REPLY,
71 MGMT_OP_READ_LOCAL_OOB_DATA,
72 MGMT_OP_ADD_REMOTE_OOB_DATA,
73 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
74 MGMT_OP_START_DISCOVERY,
75 MGMT_OP_STOP_DISCOVERY,
78 MGMT_OP_UNBLOCK_DEVICE,
79 MGMT_OP_SET_DEVICE_ID,
80 MGMT_OP_SET_ADVERTISING,
82 MGMT_OP_SET_STATIC_ADDRESS,
83 MGMT_OP_SET_SCAN_PARAMS,
84 MGMT_OP_SET_SECURE_CONN,
85 MGMT_OP_SET_DEBUG_KEYS,
88 MGMT_OP_GET_CONN_INFO,
89 MGMT_OP_GET_CLOCK_INFO,
91 MGMT_OP_REMOVE_DEVICE,
92 MGMT_OP_LOAD_CONN_PARAM,
93 MGMT_OP_READ_UNCONF_INDEX_LIST,
94 MGMT_OP_READ_CONFIG_INFO,
95 MGMT_OP_SET_EXTERNAL_CONFIG,
96 MGMT_OP_SET_PUBLIC_ADDRESS,
97 MGMT_OP_START_SERVICE_DISCOVERY,
100 static const u16 mgmt_events[] = {
101 MGMT_EV_CONTROLLER_ERROR,
103 MGMT_EV_INDEX_REMOVED,
104 MGMT_EV_NEW_SETTINGS,
105 MGMT_EV_CLASS_OF_DEV_CHANGED,
106 MGMT_EV_LOCAL_NAME_CHANGED,
107 MGMT_EV_NEW_LINK_KEY,
108 MGMT_EV_NEW_LONG_TERM_KEY,
109 MGMT_EV_DEVICE_CONNECTED,
110 MGMT_EV_DEVICE_DISCONNECTED,
111 MGMT_EV_CONNECT_FAILED,
112 MGMT_EV_PIN_CODE_REQUEST,
113 MGMT_EV_USER_CONFIRM_REQUEST,
114 MGMT_EV_USER_PASSKEY_REQUEST,
116 MGMT_EV_DEVICE_FOUND,
118 MGMT_EV_DEVICE_BLOCKED,
119 MGMT_EV_DEVICE_UNBLOCKED,
120 MGMT_EV_DEVICE_UNPAIRED,
121 MGMT_EV_PASSKEY_NOTIFY,
124 MGMT_EV_DEVICE_ADDED,
125 MGMT_EV_DEVICE_REMOVED,
126 MGMT_EV_NEW_CONN_PARAM,
127 MGMT_EV_UNCONF_INDEX_ADDED,
128 MGMT_EV_UNCONF_INDEX_REMOVED,
129 MGMT_EV_NEW_CONFIG_OPTIONS,
132 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
135 struct list_head list;
142 int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
145 /* HCI to MGMT error code conversion table */
146 static u8 mgmt_status_table[] = {
148 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
149 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
150 MGMT_STATUS_FAILED, /* Hardware Failure */
151 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
152 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
153 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
154 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
155 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
157 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
158 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
159 MGMT_STATUS_BUSY, /* Command Disallowed */
160 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
161 MGMT_STATUS_REJECTED, /* Rejected Security */
162 MGMT_STATUS_REJECTED, /* Rejected Personal */
163 MGMT_STATUS_TIMEOUT, /* Host Timeout */
164 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
165 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
166 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
167 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
168 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
169 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
170 MGMT_STATUS_BUSY, /* Repeated Attempts */
171 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
172 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
173 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
174 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
175 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
176 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
177 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
178 MGMT_STATUS_FAILED, /* Unspecified Error */
179 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
180 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
181 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
182 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
183 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
184 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
185 MGMT_STATUS_FAILED, /* Unit Link Key Used */
186 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
187 MGMT_STATUS_TIMEOUT, /* Instant Passed */
188 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
189 MGMT_STATUS_FAILED, /* Transaction Collision */
190 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
191 MGMT_STATUS_REJECTED, /* QoS Rejected */
192 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
193 MGMT_STATUS_REJECTED, /* Insufficient Security */
194 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
195 MGMT_STATUS_BUSY, /* Role Switch Pending */
196 MGMT_STATUS_FAILED, /* Slot Violation */
197 MGMT_STATUS_FAILED, /* Role Switch Failed */
198 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
199 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
200 MGMT_STATUS_BUSY, /* Host Busy Pairing */
201 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
202 MGMT_STATUS_BUSY, /* Controller Busy */
203 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
204 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
205 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
206 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
207 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
210 static u8 mgmt_status(u8 hci_status)
212 if (hci_status < ARRAY_SIZE(mgmt_status_table))
213 return mgmt_status_table[hci_status];
215 return MGMT_STATUS_FAILED;
218 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
219 struct sock *skip_sk)
222 struct mgmt_hdr *hdr;
224 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
228 hdr = (void *) skb_put(skb, sizeof(*hdr));
229 hdr->opcode = cpu_to_le16(event);
231 hdr->index = cpu_to_le16(hdev->id);
233 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
234 hdr->len = cpu_to_le16(data_len);
237 memcpy(skb_put(skb, data_len), data, data_len);
240 __net_timestamp(skb);
242 hci_send_to_control(skb, skip_sk);
248 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
251 struct mgmt_hdr *hdr;
252 struct mgmt_ev_cmd_status *ev;
255 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
257 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
261 hdr = (void *) skb_put(skb, sizeof(*hdr));
263 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
264 hdr->index = cpu_to_le16(index);
265 hdr->len = cpu_to_le16(sizeof(*ev));
267 ev = (void *) skb_put(skb, sizeof(*ev));
269 ev->opcode = cpu_to_le16(cmd);
271 err = sock_queue_rcv_skb(sk, skb);
278 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
279 void *rp, size_t rp_len)
282 struct mgmt_hdr *hdr;
283 struct mgmt_ev_cmd_complete *ev;
286 BT_DBG("sock %p", sk);
288 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
292 hdr = (void *) skb_put(skb, sizeof(*hdr));
294 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
295 hdr->index = cpu_to_le16(index);
296 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
298 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
299 ev->opcode = cpu_to_le16(cmd);
303 memcpy(ev->data, rp, rp_len);
305 err = sock_queue_rcv_skb(sk, skb);
312 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
315 struct mgmt_rp_read_version rp;
317 BT_DBG("sock %p", sk);
319 rp.version = MGMT_VERSION;
320 rp.revision = cpu_to_le16(MGMT_REVISION);
322 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
326 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
329 struct mgmt_rp_read_commands *rp;
330 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
331 const u16 num_events = ARRAY_SIZE(mgmt_events);
336 BT_DBG("sock %p", sk);
338 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
340 rp = kmalloc(rp_size, GFP_KERNEL);
344 rp->num_commands = cpu_to_le16(num_commands);
345 rp->num_events = cpu_to_le16(num_events);
347 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
348 put_unaligned_le16(mgmt_commands[i], opcode);
350 for (i = 0; i < num_events; i++, opcode++)
351 put_unaligned_le16(mgmt_events[i], opcode);
353 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
360 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
363 struct mgmt_rp_read_index_list *rp;
369 BT_DBG("sock %p", sk);
371 read_lock(&hci_dev_list_lock);
374 list_for_each_entry(d, &hci_dev_list, list) {
375 if (d->dev_type == HCI_BREDR &&
376 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
380 rp_len = sizeof(*rp) + (2 * count);
381 rp = kmalloc(rp_len, GFP_ATOMIC);
383 read_unlock(&hci_dev_list_lock);
388 list_for_each_entry(d, &hci_dev_list, list) {
389 if (test_bit(HCI_SETUP, &d->dev_flags) ||
390 test_bit(HCI_CONFIG, &d->dev_flags) ||
391 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
394 /* Devices marked as raw-only are neither configured
395 * nor unconfigured controllers.
397 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
400 if (d->dev_type == HCI_BREDR &&
401 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
402 rp->index[count++] = cpu_to_le16(d->id);
403 BT_DBG("Added hci%u", d->id);
407 rp->num_controllers = cpu_to_le16(count);
408 rp_len = sizeof(*rp) + (2 * count);
410 read_unlock(&hci_dev_list_lock);
412 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
420 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
421 void *data, u16 data_len)
423 struct mgmt_rp_read_unconf_index_list *rp;
429 BT_DBG("sock %p", sk);
431 read_lock(&hci_dev_list_lock);
434 list_for_each_entry(d, &hci_dev_list, list) {
435 if (d->dev_type == HCI_BREDR &&
436 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
440 rp_len = sizeof(*rp) + (2 * count);
441 rp = kmalloc(rp_len, GFP_ATOMIC);
443 read_unlock(&hci_dev_list_lock);
448 list_for_each_entry(d, &hci_dev_list, list) {
449 if (test_bit(HCI_SETUP, &d->dev_flags) ||
450 test_bit(HCI_CONFIG, &d->dev_flags) ||
451 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
454 /* Devices marked as raw-only are neither configured
455 * nor unconfigured controllers.
457 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
460 if (d->dev_type == HCI_BREDR &&
461 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
462 rp->index[count++] = cpu_to_le16(d->id);
463 BT_DBG("Added hci%u", d->id);
467 rp->num_controllers = cpu_to_le16(count);
468 rp_len = sizeof(*rp) + (2 * count);
470 read_unlock(&hci_dev_list_lock);
472 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
480 static bool is_configured(struct hci_dev *hdev)
482 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
483 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
486 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
487 !bacmp(&hdev->public_addr, BDADDR_ANY))
493 static __le32 get_missing_options(struct hci_dev *hdev)
497 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
498 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
499 options |= MGMT_OPTION_EXTERNAL_CONFIG;
501 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
502 !bacmp(&hdev->public_addr, BDADDR_ANY))
503 options |= MGMT_OPTION_PUBLIC_ADDRESS;
505 return cpu_to_le32(options);
508 static int new_options(struct hci_dev *hdev, struct sock *skip)
510 __le32 options = get_missing_options(hdev);
512 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
513 sizeof(options), skip);
516 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
518 __le32 options = get_missing_options(hdev);
520 return cmd_complete(sk, hdev->id, opcode, 0, &options,
524 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
525 void *data, u16 data_len)
527 struct mgmt_rp_read_config_info rp;
530 BT_DBG("sock %p %s", sk, hdev->name);
534 memset(&rp, 0, sizeof(rp));
535 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
537 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
538 options |= MGMT_OPTION_EXTERNAL_CONFIG;
540 if (hdev->set_bdaddr)
541 options |= MGMT_OPTION_PUBLIC_ADDRESS;
543 rp.supported_options = cpu_to_le32(options);
544 rp.missing_options = get_missing_options(hdev);
546 hci_dev_unlock(hdev);
548 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
552 static u32 get_supported_settings(struct hci_dev *hdev)
556 settings |= MGMT_SETTING_POWERED;
557 settings |= MGMT_SETTING_BONDABLE;
558 settings |= MGMT_SETTING_DEBUG_KEYS;
559 settings |= MGMT_SETTING_CONNECTABLE;
560 settings |= MGMT_SETTING_DISCOVERABLE;
562 if (lmp_bredr_capable(hdev)) {
563 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
564 settings |= MGMT_SETTING_FAST_CONNECTABLE;
565 settings |= MGMT_SETTING_BREDR;
566 settings |= MGMT_SETTING_LINK_SECURITY;
568 if (lmp_ssp_capable(hdev)) {
569 settings |= MGMT_SETTING_SSP;
570 settings |= MGMT_SETTING_HS;
573 if (lmp_sc_capable(hdev) ||
574 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
575 settings |= MGMT_SETTING_SECURE_CONN;
578 if (lmp_le_capable(hdev)) {
579 settings |= MGMT_SETTING_LE;
580 settings |= MGMT_SETTING_ADVERTISING;
581 settings |= MGMT_SETTING_SECURE_CONN;
582 settings |= MGMT_SETTING_PRIVACY;
585 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
587 settings |= MGMT_SETTING_CONFIGURATION;
592 static u32 get_current_settings(struct hci_dev *hdev)
596 if (hdev_is_powered(hdev))
597 settings |= MGMT_SETTING_POWERED;
599 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
600 settings |= MGMT_SETTING_CONNECTABLE;
602 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
603 settings |= MGMT_SETTING_FAST_CONNECTABLE;
605 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
606 settings |= MGMT_SETTING_DISCOVERABLE;
608 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
609 settings |= MGMT_SETTING_BONDABLE;
611 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
612 settings |= MGMT_SETTING_BREDR;
614 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
615 settings |= MGMT_SETTING_LE;
617 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
618 settings |= MGMT_SETTING_LINK_SECURITY;
620 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
621 settings |= MGMT_SETTING_SSP;
623 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
624 settings |= MGMT_SETTING_HS;
626 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
627 settings |= MGMT_SETTING_ADVERTISING;
629 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
630 settings |= MGMT_SETTING_SECURE_CONN;
632 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
633 settings |= MGMT_SETTING_DEBUG_KEYS;
635 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
636 settings |= MGMT_SETTING_PRIVACY;
641 #define PNP_INFO_SVCLASS_ID 0x1200
643 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
645 u8 *ptr = data, *uuids_start = NULL;
646 struct bt_uuid *uuid;
651 list_for_each_entry(uuid, &hdev->uuids, list) {
654 if (uuid->size != 16)
657 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
661 if (uuid16 == PNP_INFO_SVCLASS_ID)
667 uuids_start[1] = EIR_UUID16_ALL;
671 /* Stop if not enough space to put next UUID */
672 if ((ptr - data) + sizeof(u16) > len) {
673 uuids_start[1] = EIR_UUID16_SOME;
677 *ptr++ = (uuid16 & 0x00ff);
678 *ptr++ = (uuid16 & 0xff00) >> 8;
679 uuids_start[0] += sizeof(uuid16);
685 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
687 u8 *ptr = data, *uuids_start = NULL;
688 struct bt_uuid *uuid;
693 list_for_each_entry(uuid, &hdev->uuids, list) {
694 if (uuid->size != 32)
700 uuids_start[1] = EIR_UUID32_ALL;
704 /* Stop if not enough space to put next UUID */
705 if ((ptr - data) + sizeof(u32) > len) {
706 uuids_start[1] = EIR_UUID32_SOME;
710 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
712 uuids_start[0] += sizeof(u32);
718 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
720 u8 *ptr = data, *uuids_start = NULL;
721 struct bt_uuid *uuid;
726 list_for_each_entry(uuid, &hdev->uuids, list) {
727 if (uuid->size != 128)
733 uuids_start[1] = EIR_UUID128_ALL;
737 /* Stop if not enough space to put next UUID */
738 if ((ptr - data) + 16 > len) {
739 uuids_start[1] = EIR_UUID128_SOME;
743 memcpy(ptr, uuid->uuid, 16);
745 uuids_start[0] += 16;
751 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
753 struct pending_cmd *cmd;
755 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
756 if (cmd->opcode == opcode)
763 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
764 struct hci_dev *hdev,
767 struct pending_cmd *cmd;
769 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
770 if (cmd->user_data != data)
772 if (cmd->opcode == opcode)
779 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
784 name_len = strlen(hdev->dev_name);
786 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
788 if (name_len > max_len) {
790 ptr[1] = EIR_NAME_SHORT;
792 ptr[1] = EIR_NAME_COMPLETE;
794 ptr[0] = name_len + 1;
796 memcpy(ptr + 2, hdev->dev_name, name_len);
798 ad_len += (name_len + 2);
799 ptr += (name_len + 2);
805 static void update_scan_rsp_data(struct hci_request *req)
807 struct hci_dev *hdev = req->hdev;
808 struct hci_cp_le_set_scan_rsp_data cp;
811 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
814 memset(&cp, 0, sizeof(cp));
816 len = create_scan_rsp_data(hdev, cp.data);
818 if (hdev->scan_rsp_data_len == len &&
819 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
822 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
823 hdev->scan_rsp_data_len = len;
827 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
830 static u8 get_adv_discov_flags(struct hci_dev *hdev)
832 struct pending_cmd *cmd;
834 /* If there's a pending mgmt command the flags will not yet have
835 * their final values, so check for this first.
837 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
839 struct mgmt_mode *cp = cmd->param;
841 return LE_AD_GENERAL;
842 else if (cp->val == 0x02)
843 return LE_AD_LIMITED;
845 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
846 return LE_AD_LIMITED;
847 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
848 return LE_AD_GENERAL;
854 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
856 u8 ad_len = 0, flags = 0;
858 flags |= get_adv_discov_flags(hdev);
860 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
861 flags |= LE_AD_NO_BREDR;
864 BT_DBG("adv flags 0x%02x", flags);
874 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
876 ptr[1] = EIR_TX_POWER;
877 ptr[2] = (u8) hdev->adv_tx_power;
886 static void update_adv_data(struct hci_request *req)
888 struct hci_dev *hdev = req->hdev;
889 struct hci_cp_le_set_adv_data cp;
892 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
895 memset(&cp, 0, sizeof(cp));
897 len = create_adv_data(hdev, cp.data);
899 if (hdev->adv_data_len == len &&
900 memcmp(cp.data, hdev->adv_data, len) == 0)
903 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
904 hdev->adv_data_len = len;
908 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
911 int mgmt_update_adv_data(struct hci_dev *hdev)
913 struct hci_request req;
915 hci_req_init(&req, hdev);
916 update_adv_data(&req);
918 return hci_req_run(&req, NULL);
921 static void create_eir(struct hci_dev *hdev, u8 *data)
926 name_len = strlen(hdev->dev_name);
932 ptr[1] = EIR_NAME_SHORT;
934 ptr[1] = EIR_NAME_COMPLETE;
936 /* EIR Data length */
937 ptr[0] = name_len + 1;
939 memcpy(ptr + 2, hdev->dev_name, name_len);
941 ptr += (name_len + 2);
944 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
946 ptr[1] = EIR_TX_POWER;
947 ptr[2] = (u8) hdev->inq_tx_power;
952 if (hdev->devid_source > 0) {
954 ptr[1] = EIR_DEVICE_ID;
956 put_unaligned_le16(hdev->devid_source, ptr + 2);
957 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
958 put_unaligned_le16(hdev->devid_product, ptr + 6);
959 put_unaligned_le16(hdev->devid_version, ptr + 8);
964 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
965 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
966 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
969 static void update_eir(struct hci_request *req)
971 struct hci_dev *hdev = req->hdev;
972 struct hci_cp_write_eir cp;
974 if (!hdev_is_powered(hdev))
977 if (!lmp_ext_inq_capable(hdev))
980 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
983 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
986 memset(&cp, 0, sizeof(cp));
988 create_eir(hdev, cp.data);
990 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
993 memcpy(hdev->eir, cp.data, sizeof(cp.data));
995 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
998 static u8 get_service_classes(struct hci_dev *hdev)
1000 struct bt_uuid *uuid;
1003 list_for_each_entry(uuid, &hdev->uuids, list)
1004 val |= uuid->svc_hint;
1009 static void update_class(struct hci_request *req)
1011 struct hci_dev *hdev = req->hdev;
1014 BT_DBG("%s", hdev->name);
1016 if (!hdev_is_powered(hdev))
1019 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1022 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1025 cod[0] = hdev->minor_class;
1026 cod[1] = hdev->major_class;
1027 cod[2] = get_service_classes(hdev);
1029 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1032 if (memcmp(cod, hdev->dev_class, 3) == 0)
1035 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1038 static bool get_connectable(struct hci_dev *hdev)
1040 struct pending_cmd *cmd;
1042 /* If there's a pending mgmt command the flag will not yet have
1043 * it's final value, so check for this first.
1045 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1047 struct mgmt_mode *cp = cmd->param;
1051 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1054 static void disable_advertising(struct hci_request *req)
1058 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1061 static void enable_advertising(struct hci_request *req)
1063 struct hci_dev *hdev = req->hdev;
1064 struct hci_cp_le_set_adv_param cp;
1065 u8 own_addr_type, enable = 0x01;
1068 if (hci_conn_num(hdev, LE_LINK) > 0)
1071 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1072 disable_advertising(req);
1074 /* Clear the HCI_LE_ADV bit temporarily so that the
1075 * hci_update_random_address knows that it's safe to go ahead
1076 * and write a new random address. The flag will be set back on
1077 * as soon as the SET_ADV_ENABLE HCI command completes.
1079 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1081 connectable = get_connectable(hdev);
1083 /* Set require_privacy to true only when non-connectable
1084 * advertising is used. In that case it is fine to use a
1085 * non-resolvable private address.
1087 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1090 memset(&cp, 0, sizeof(cp));
1091 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1092 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1093 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1094 cp.own_address_type = own_addr_type;
1095 cp.channel_map = hdev->le_adv_channel_map;
1097 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1099 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1102 static void service_cache_off(struct work_struct *work)
1104 struct hci_dev *hdev = container_of(work, struct hci_dev,
1105 service_cache.work);
1106 struct hci_request req;
1108 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1111 hci_req_init(&req, hdev);
1118 hci_dev_unlock(hdev);
1120 hci_req_run(&req, NULL);
1123 static void rpa_expired(struct work_struct *work)
1125 struct hci_dev *hdev = container_of(work, struct hci_dev,
1127 struct hci_request req;
1131 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1133 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1136 /* The generation of a new RPA and programming it into the
1137 * controller happens in the enable_advertising() function.
1139 hci_req_init(&req, hdev);
1140 enable_advertising(&req);
1141 hci_req_run(&req, NULL);
1144 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1146 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1149 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1150 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1152 /* Non-mgmt controlled devices get this bit set
1153 * implicitly so that pairing works for them, however
1154 * for mgmt we require user-space to explicitly enable
1157 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1160 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1161 void *data, u16 data_len)
1163 struct mgmt_rp_read_info rp;
1165 BT_DBG("sock %p %s", sk, hdev->name);
1169 memset(&rp, 0, sizeof(rp));
1171 bacpy(&rp.bdaddr, &hdev->bdaddr);
1173 rp.version = hdev->hci_ver;
1174 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1176 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1177 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1179 memcpy(rp.dev_class, hdev->dev_class, 3);
1181 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1182 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1184 hci_dev_unlock(hdev);
1186 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1190 static void mgmt_pending_free(struct pending_cmd *cmd)
1197 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1198 struct hci_dev *hdev, void *data,
1201 struct pending_cmd *cmd;
1203 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1207 cmd->opcode = opcode;
1208 cmd->index = hdev->id;
1210 cmd->param = kmemdup(data, len, GFP_KERNEL);
1216 cmd->param_len = len;
1221 list_add(&cmd->list, &hdev->mgmt_pending);
1226 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1227 void (*cb)(struct pending_cmd *cmd,
1231 struct pending_cmd *cmd, *tmp;
1233 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1234 if (opcode > 0 && cmd->opcode != opcode)
1241 static void mgmt_pending_remove(struct pending_cmd *cmd)
1243 list_del(&cmd->list);
1244 mgmt_pending_free(cmd);
1247 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1249 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1251 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1255 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1257 BT_DBG("%s status 0x%02x", hdev->name, status);
1259 if (hci_conn_count(hdev) == 0) {
1260 cancel_delayed_work(&hdev->power_off);
1261 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1265 static bool hci_stop_discovery(struct hci_request *req)
1267 struct hci_dev *hdev = req->hdev;
1268 struct hci_cp_remote_name_req_cancel cp;
1269 struct inquiry_entry *e;
1271 switch (hdev->discovery.state) {
1272 case DISCOVERY_FINDING:
1273 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1274 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1276 cancel_delayed_work(&hdev->le_scan_disable);
1277 hci_req_add_le_scan_disable(req);
1282 case DISCOVERY_RESOLVING:
1283 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1288 bacpy(&cp.bdaddr, &e->data.bdaddr);
1289 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1295 /* Passive scanning */
1296 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1297 hci_req_add_le_scan_disable(req);
1307 static int clean_up_hci_state(struct hci_dev *hdev)
1309 struct hci_request req;
1310 struct hci_conn *conn;
1311 bool discov_stopped;
1314 hci_req_init(&req, hdev);
1316 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1317 test_bit(HCI_PSCAN, &hdev->flags)) {
1319 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1322 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1323 disable_advertising(&req);
1325 discov_stopped = hci_stop_discovery(&req);
1327 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1328 struct hci_cp_disconnect dc;
1329 struct hci_cp_reject_conn_req rej;
1331 switch (conn->state) {
1334 dc.handle = cpu_to_le16(conn->handle);
1335 dc.reason = 0x15; /* Terminated due to Power Off */
1336 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1339 if (conn->type == LE_LINK)
1340 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1342 else if (conn->type == ACL_LINK)
1343 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1347 bacpy(&rej.bdaddr, &conn->dst);
1348 rej.reason = 0x15; /* Terminated due to Power Off */
1349 if (conn->type == ACL_LINK)
1350 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1352 else if (conn->type == SCO_LINK)
1353 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1359 err = hci_req_run(&req, clean_up_hci_complete);
1360 if (!err && discov_stopped)
1361 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1366 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1369 struct mgmt_mode *cp = data;
1370 struct pending_cmd *cmd;
1373 BT_DBG("request for %s", hdev->name);
1375 if (cp->val != 0x00 && cp->val != 0x01)
1376 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1377 MGMT_STATUS_INVALID_PARAMS);
1381 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1382 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1387 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1388 cancel_delayed_work(&hdev->power_off);
1391 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1393 err = mgmt_powered(hdev, 1);
1398 if (!!cp->val == hdev_is_powered(hdev)) {
1399 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1403 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1410 queue_work(hdev->req_workqueue, &hdev->power_on);
1413 /* Disconnect connections, stop scans, etc */
1414 err = clean_up_hci_state(hdev);
1416 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1417 HCI_POWER_OFF_TIMEOUT);
1419 /* ENODATA means there were no HCI commands queued */
1420 if (err == -ENODATA) {
1421 cancel_delayed_work(&hdev->power_off);
1422 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1428 hci_dev_unlock(hdev);
1432 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1436 ev = cpu_to_le32(get_current_settings(hdev));
1438 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1441 int mgmt_new_settings(struct hci_dev *hdev)
1443 return new_settings(hdev, NULL);
1448 struct hci_dev *hdev;
1452 static void settings_rsp(struct pending_cmd *cmd, void *data)
1454 struct cmd_lookup *match = data;
1456 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1458 list_del(&cmd->list);
1460 if (match->sk == NULL) {
1461 match->sk = cmd->sk;
1462 sock_hold(match->sk);
1465 mgmt_pending_free(cmd);
1468 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1472 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1473 mgmt_pending_remove(cmd);
1476 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1478 if (cmd->cmd_complete) {
1481 cmd->cmd_complete(cmd, *status);
1482 mgmt_pending_remove(cmd);
1487 cmd_status_rsp(cmd, data);
1490 static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1492 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1493 cmd->param, cmd->param_len);
1496 static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1498 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1499 sizeof(struct mgmt_addr_info));
1502 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1504 if (!lmp_bredr_capable(hdev))
1505 return MGMT_STATUS_NOT_SUPPORTED;
1506 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1507 return MGMT_STATUS_REJECTED;
1509 return MGMT_STATUS_SUCCESS;
1512 static u8 mgmt_le_support(struct hci_dev *hdev)
1514 if (!lmp_le_capable(hdev))
1515 return MGMT_STATUS_NOT_SUPPORTED;
1516 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1517 return MGMT_STATUS_REJECTED;
1519 return MGMT_STATUS_SUCCESS;
1522 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1524 struct pending_cmd *cmd;
1525 struct mgmt_mode *cp;
1526 struct hci_request req;
1529 BT_DBG("status 0x%02x", status);
1533 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1538 u8 mgmt_err = mgmt_status(status);
1539 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1540 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1546 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1549 if (hdev->discov_timeout > 0) {
1550 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1551 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1555 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1559 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1562 new_settings(hdev, cmd->sk);
1564 /* When the discoverable mode gets changed, make sure
1565 * that class of device has the limited discoverable
1566 * bit correctly set. Also update page scan based on whitelist
1569 hci_req_init(&req, hdev);
1570 __hci_update_page_scan(&req);
1572 hci_req_run(&req, NULL);
1575 mgmt_pending_remove(cmd);
1578 hci_dev_unlock(hdev);
1581 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1584 struct mgmt_cp_set_discoverable *cp = data;
1585 struct pending_cmd *cmd;
1586 struct hci_request req;
1591 BT_DBG("request for %s", hdev->name);
1593 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1594 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1595 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1596 MGMT_STATUS_REJECTED);
1598 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1599 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_INVALID_PARAMS);
1602 timeout = __le16_to_cpu(cp->timeout);
1604 /* Disabling discoverable requires that no timeout is set,
1605 * and enabling limited discoverable requires a timeout.
1607 if ((cp->val == 0x00 && timeout > 0) ||
1608 (cp->val == 0x02 && timeout == 0))
1609 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1610 MGMT_STATUS_INVALID_PARAMS);
1614 if (!hdev_is_powered(hdev) && timeout > 0) {
1615 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1616 MGMT_STATUS_NOT_POWERED);
1620 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1621 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1622 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1627 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1628 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1629 MGMT_STATUS_REJECTED);
1633 if (!hdev_is_powered(hdev)) {
1634 bool changed = false;
1636 /* Setting limited discoverable when powered off is
1637 * not a valid operation since it requires a timeout
1638 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1640 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1641 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1645 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1650 err = new_settings(hdev, sk);
1655 /* If the current mode is the same, then just update the timeout
1656 * value with the new value. And if only the timeout gets updated,
1657 * then no need for any HCI transactions.
1659 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1660 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1661 &hdev->dev_flags)) {
1662 cancel_delayed_work(&hdev->discov_off);
1663 hdev->discov_timeout = timeout;
1665 if (cp->val && hdev->discov_timeout > 0) {
1666 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1667 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1671 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1675 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1681 /* Cancel any potential discoverable timeout that might be
1682 * still active and store new timeout value. The arming of
1683 * the timeout happens in the complete handler.
1685 cancel_delayed_work(&hdev->discov_off);
1686 hdev->discov_timeout = timeout;
1688 /* Limited discoverable mode */
1689 if (cp->val == 0x02)
1690 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1692 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1694 hci_req_init(&req, hdev);
1696 /* The procedure for LE-only controllers is much simpler - just
1697 * update the advertising data.
1699 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1705 struct hci_cp_write_current_iac_lap hci_cp;
1707 if (cp->val == 0x02) {
1708 /* Limited discoverable mode */
1709 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1710 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1711 hci_cp.iac_lap[1] = 0x8b;
1712 hci_cp.iac_lap[2] = 0x9e;
1713 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1714 hci_cp.iac_lap[4] = 0x8b;
1715 hci_cp.iac_lap[5] = 0x9e;
1717 /* General discoverable mode */
1719 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1720 hci_cp.iac_lap[1] = 0x8b;
1721 hci_cp.iac_lap[2] = 0x9e;
1724 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1725 (hci_cp.num_iac * 3) + 1, &hci_cp);
1727 scan |= SCAN_INQUIRY;
1729 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1732 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1735 update_adv_data(&req);
1737 err = hci_req_run(&req, set_discoverable_complete);
1739 mgmt_pending_remove(cmd);
1742 hci_dev_unlock(hdev);
1746 static void write_fast_connectable(struct hci_request *req, bool enable)
1748 struct hci_dev *hdev = req->hdev;
1749 struct hci_cp_write_page_scan_activity acp;
1752 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1755 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1759 type = PAGE_SCAN_TYPE_INTERLACED;
1761 /* 160 msec page scan interval */
1762 acp.interval = cpu_to_le16(0x0100);
1764 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1766 /* default 1.28 sec page scan */
1767 acp.interval = cpu_to_le16(0x0800);
1770 acp.window = cpu_to_le16(0x0012);
1772 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1773 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1774 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1777 if (hdev->page_scan_type != type)
1778 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1781 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1783 struct pending_cmd *cmd;
1784 struct mgmt_mode *cp;
1785 bool conn_changed, discov_changed;
1787 BT_DBG("status 0x%02x", status);
1791 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1796 u8 mgmt_err = mgmt_status(status);
1797 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1803 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1805 discov_changed = false;
1807 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1809 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1813 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1815 if (conn_changed || discov_changed) {
1816 new_settings(hdev, cmd->sk);
1817 hci_update_page_scan(hdev);
1819 mgmt_update_adv_data(hdev);
1820 hci_update_background_scan(hdev);
1824 mgmt_pending_remove(cmd);
1827 hci_dev_unlock(hdev);
1830 static int set_connectable_update_settings(struct hci_dev *hdev,
1831 struct sock *sk, u8 val)
1833 bool changed = false;
1836 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1840 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1842 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1843 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1846 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1851 hci_update_page_scan(hdev);
1852 hci_update_background_scan(hdev);
1853 return new_settings(hdev, sk);
1859 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1862 struct mgmt_mode *cp = data;
1863 struct pending_cmd *cmd;
1864 struct hci_request req;
1868 BT_DBG("request for %s", hdev->name);
1870 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1871 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1872 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1873 MGMT_STATUS_REJECTED);
1875 if (cp->val != 0x00 && cp->val != 0x01)
1876 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1877 MGMT_STATUS_INVALID_PARAMS);
1881 if (!hdev_is_powered(hdev)) {
1882 err = set_connectable_update_settings(hdev, sk, cp->val);
1886 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1887 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1888 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1893 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1899 hci_req_init(&req, hdev);
1901 /* If BR/EDR is not enabled and we disable advertising as a
1902 * by-product of disabling connectable, we need to update the
1903 * advertising flags.
1905 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1907 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1908 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1910 update_adv_data(&req);
1911 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1915 /* If we don't have any whitelist entries just
1916 * disable all scanning. If there are entries
1917 * and we had both page and inquiry scanning
1918 * enabled then fall back to only page scanning.
1919 * Otherwise no changes are needed.
1921 if (list_empty(&hdev->whitelist))
1922 scan = SCAN_DISABLED;
1923 else if (test_bit(HCI_ISCAN, &hdev->flags))
1926 goto no_scan_update;
1928 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1929 hdev->discov_timeout > 0)
1930 cancel_delayed_work(&hdev->discov_off);
1933 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1937 /* If we're going from non-connectable to connectable or
1938 * vice-versa when fast connectable is enabled ensure that fast
1939 * connectable gets disabled. write_fast_connectable won't do
1940 * anything if the page scan parameters are already what they
1943 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1944 write_fast_connectable(&req, false);
1946 /* Update the advertising parameters if necessary */
1947 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1948 enable_advertising(&req);
1950 err = hci_req_run(&req, set_connectable_complete);
1952 mgmt_pending_remove(cmd);
1953 if (err == -ENODATA)
1954 err = set_connectable_update_settings(hdev, sk,
1960 hci_dev_unlock(hdev);
1964 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1967 struct mgmt_mode *cp = data;
1971 BT_DBG("request for %s", hdev->name);
1973 if (cp->val != 0x00 && cp->val != 0x01)
1974 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1975 MGMT_STATUS_INVALID_PARAMS);
1980 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1982 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1984 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1989 err = new_settings(hdev, sk);
1992 hci_dev_unlock(hdev);
1996 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1999 struct mgmt_mode *cp = data;
2000 struct pending_cmd *cmd;
2004 BT_DBG("request for %s", hdev->name);
2006 status = mgmt_bredr_support(hdev);
2008 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2011 if (cp->val != 0x00 && cp->val != 0x01)
2012 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2013 MGMT_STATUS_INVALID_PARAMS);
2017 if (!hdev_is_powered(hdev)) {
2018 bool changed = false;
2020 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2021 &hdev->dev_flags)) {
2022 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2026 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2031 err = new_settings(hdev, sk);
2036 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2037 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2044 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2045 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2049 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2055 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2057 mgmt_pending_remove(cmd);
2062 hci_dev_unlock(hdev);
2066 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2068 struct mgmt_mode *cp = data;
2069 struct pending_cmd *cmd;
2073 BT_DBG("request for %s", hdev->name);
2075 status = mgmt_bredr_support(hdev);
2077 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2079 if (!lmp_ssp_capable(hdev))
2080 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2081 MGMT_STATUS_NOT_SUPPORTED);
2083 if (cp->val != 0x00 && cp->val != 0x01)
2084 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2085 MGMT_STATUS_INVALID_PARAMS);
2089 if (!hdev_is_powered(hdev)) {
2093 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2096 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2099 changed = test_and_clear_bit(HCI_HS_ENABLED,
2102 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2105 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2110 err = new_settings(hdev, sk);
2115 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2116 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2117 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2122 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2123 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2127 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2133 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2134 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2135 sizeof(cp->val), &cp->val);
2137 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2139 mgmt_pending_remove(cmd);
2144 hci_dev_unlock(hdev);
2148 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2150 struct mgmt_mode *cp = data;
2155 BT_DBG("request for %s", hdev->name);
2157 status = mgmt_bredr_support(hdev);
2159 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2161 if (!lmp_ssp_capable(hdev))
2162 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2163 MGMT_STATUS_NOT_SUPPORTED);
2165 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2166 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2167 MGMT_STATUS_REJECTED);
2169 if (cp->val != 0x00 && cp->val != 0x01)
2170 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2171 MGMT_STATUS_INVALID_PARAMS);
2176 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2178 if (hdev_is_powered(hdev)) {
2179 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2180 MGMT_STATUS_REJECTED);
2184 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2187 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2192 err = new_settings(hdev, sk);
2195 hci_dev_unlock(hdev);
2199 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2201 struct cmd_lookup match = { NULL, hdev };
2206 u8 mgmt_err = mgmt_status(status);
2208 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2213 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2215 new_settings(hdev, match.sk);
2220 /* Make sure the controller has a good default for
2221 * advertising data. Restrict the update to when LE
2222 * has actually been enabled. During power on, the
2223 * update in powered_update_hci will take care of it.
2225 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2226 struct hci_request req;
2228 hci_req_init(&req, hdev);
2229 update_adv_data(&req);
2230 update_scan_rsp_data(&req);
2231 __hci_update_background_scan(&req);
2232 hci_req_run(&req, NULL);
2236 hci_dev_unlock(hdev);
2239 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2241 struct mgmt_mode *cp = data;
2242 struct hci_cp_write_le_host_supported hci_cp;
2243 struct pending_cmd *cmd;
2244 struct hci_request req;
2248 BT_DBG("request for %s", hdev->name);
2250 if (!lmp_le_capable(hdev))
2251 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2252 MGMT_STATUS_NOT_SUPPORTED);
2254 if (cp->val != 0x00 && cp->val != 0x01)
2255 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2256 MGMT_STATUS_INVALID_PARAMS);
2258 /* LE-only devices do not allow toggling LE on/off */
2259 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2260 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2261 MGMT_STATUS_REJECTED);
2266 enabled = lmp_host_le_capable(hdev);
2268 if (!hdev_is_powered(hdev) || val == enabled) {
2269 bool changed = false;
2271 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2272 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2276 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2277 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2281 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2286 err = new_settings(hdev, sk);
2291 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2292 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2293 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2298 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2304 hci_req_init(&req, hdev);
2306 memset(&hci_cp, 0, sizeof(hci_cp));
2310 hci_cp.simul = 0x00;
2312 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2313 disable_advertising(&req);
2316 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2319 err = hci_req_run(&req, le_enable_complete);
2321 mgmt_pending_remove(cmd);
2324 hci_dev_unlock(hdev);
2328 /* This is a helper function to test for pending mgmt commands that can
2329 * cause CoD or EIR HCI commands. We can only allow one such pending
2330 * mgmt command at a time since otherwise we cannot easily track what
2331 * the current values are, will be, and based on that calculate if a new
2332 * HCI command needs to be sent and if yes with what value.
2334 static bool pending_eir_or_class(struct hci_dev *hdev)
2336 struct pending_cmd *cmd;
2338 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2339 switch (cmd->opcode) {
2340 case MGMT_OP_ADD_UUID:
2341 case MGMT_OP_REMOVE_UUID:
2342 case MGMT_OP_SET_DEV_CLASS:
2343 case MGMT_OP_SET_POWERED:
2351 static const u8 bluetooth_base_uuid[] = {
2352 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2353 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2356 static u8 get_uuid_size(const u8 *uuid)
2360 if (memcmp(uuid, bluetooth_base_uuid, 12))
2363 val = get_unaligned_le32(&uuid[12]);
2370 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2372 struct pending_cmd *cmd;
2376 cmd = mgmt_pending_find(mgmt_op, hdev);
2380 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2381 hdev->dev_class, 3);
2383 mgmt_pending_remove(cmd);
2386 hci_dev_unlock(hdev);
2389 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2391 BT_DBG("status 0x%02x", status);
2393 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2396 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2398 struct mgmt_cp_add_uuid *cp = data;
2399 struct pending_cmd *cmd;
2400 struct hci_request req;
2401 struct bt_uuid *uuid;
2404 BT_DBG("request for %s", hdev->name);
2408 if (pending_eir_or_class(hdev)) {
2409 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2414 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2420 memcpy(uuid->uuid, cp->uuid, 16);
2421 uuid->svc_hint = cp->svc_hint;
2422 uuid->size = get_uuid_size(cp->uuid);
2424 list_add_tail(&uuid->list, &hdev->uuids);
2426 hci_req_init(&req, hdev);
2431 err = hci_req_run(&req, add_uuid_complete);
2433 if (err != -ENODATA)
2436 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2437 hdev->dev_class, 3);
2441 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2450 hci_dev_unlock(hdev);
2454 static bool enable_service_cache(struct hci_dev *hdev)
2456 if (!hdev_is_powered(hdev))
2459 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2460 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2468 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2470 BT_DBG("status 0x%02x", status);
2472 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2475 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2478 struct mgmt_cp_remove_uuid *cp = data;
2479 struct pending_cmd *cmd;
2480 struct bt_uuid *match, *tmp;
2481 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2482 struct hci_request req;
2485 BT_DBG("request for %s", hdev->name);
2489 if (pending_eir_or_class(hdev)) {
2490 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2495 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2496 hci_uuids_clear(hdev);
2498 if (enable_service_cache(hdev)) {
2499 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2500 0, hdev->dev_class, 3);
2509 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2510 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2513 list_del(&match->list);
2519 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2520 MGMT_STATUS_INVALID_PARAMS);
2525 hci_req_init(&req, hdev);
2530 err = hci_req_run(&req, remove_uuid_complete);
2532 if (err != -ENODATA)
2535 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2536 hdev->dev_class, 3);
2540 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2549 hci_dev_unlock(hdev);
2553 static void set_class_complete(struct hci_dev *hdev, u8 status)
2555 BT_DBG("status 0x%02x", status);
2557 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2560 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2563 struct mgmt_cp_set_dev_class *cp = data;
2564 struct pending_cmd *cmd;
2565 struct hci_request req;
2568 BT_DBG("request for %s", hdev->name);
2570 if (!lmp_bredr_capable(hdev))
2571 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2572 MGMT_STATUS_NOT_SUPPORTED);
2576 if (pending_eir_or_class(hdev)) {
2577 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2582 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2583 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2584 MGMT_STATUS_INVALID_PARAMS);
2588 hdev->major_class = cp->major;
2589 hdev->minor_class = cp->minor;
2591 if (!hdev_is_powered(hdev)) {
2592 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2593 hdev->dev_class, 3);
2597 hci_req_init(&req, hdev);
2599 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2600 hci_dev_unlock(hdev);
2601 cancel_delayed_work_sync(&hdev->service_cache);
2608 err = hci_req_run(&req, set_class_complete);
2610 if (err != -ENODATA)
2613 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2614 hdev->dev_class, 3);
2618 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2627 hci_dev_unlock(hdev);
2631 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2634 struct mgmt_cp_load_link_keys *cp = data;
2635 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2636 sizeof(struct mgmt_link_key_info));
2637 u16 key_count, expected_len;
2641 BT_DBG("request for %s", hdev->name);
2643 if (!lmp_bredr_capable(hdev))
2644 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2645 MGMT_STATUS_NOT_SUPPORTED);
2647 key_count = __le16_to_cpu(cp->key_count);
2648 if (key_count > max_key_count) {
2649 BT_ERR("load_link_keys: too big key_count value %u",
2651 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2652 MGMT_STATUS_INVALID_PARAMS);
2655 expected_len = sizeof(*cp) + key_count *
2656 sizeof(struct mgmt_link_key_info);
2657 if (expected_len != len) {
2658 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2660 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2661 MGMT_STATUS_INVALID_PARAMS);
2664 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2665 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2666 MGMT_STATUS_INVALID_PARAMS);
2668 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2671 for (i = 0; i < key_count; i++) {
2672 struct mgmt_link_key_info *key = &cp->keys[i];
2674 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2675 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2676 MGMT_STATUS_INVALID_PARAMS);
2681 hci_link_keys_clear(hdev);
2684 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2687 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2691 new_settings(hdev, NULL);
2693 for (i = 0; i < key_count; i++) {
2694 struct mgmt_link_key_info *key = &cp->keys[i];
2696 /* Always ignore debug keys and require a new pairing if
2697 * the user wants to use them.
2699 if (key->type == HCI_LK_DEBUG_COMBINATION)
2702 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2703 key->type, key->pin_len, NULL);
2706 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2708 hci_dev_unlock(hdev);
2713 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2714 u8 addr_type, struct sock *skip_sk)
2716 struct mgmt_ev_device_unpaired ev;
2718 bacpy(&ev.addr.bdaddr, bdaddr);
2719 ev.addr.type = addr_type;
2721 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2725 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2728 struct mgmt_cp_unpair_device *cp = data;
2729 struct mgmt_rp_unpair_device rp;
2730 struct hci_cp_disconnect dc;
2731 struct pending_cmd *cmd;
2732 struct hci_conn *conn;
2735 memset(&rp, 0, sizeof(rp));
2736 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2737 rp.addr.type = cp->addr.type;
2739 if (!bdaddr_type_is_valid(cp->addr.type))
2740 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2741 MGMT_STATUS_INVALID_PARAMS,
2744 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2745 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2746 MGMT_STATUS_INVALID_PARAMS,
2751 if (!hdev_is_powered(hdev)) {
2752 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2753 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2757 if (cp->addr.type == BDADDR_BREDR) {
2758 /* If disconnection is requested, then look up the
2759 * connection. If the remote device is connected, it
2760 * will be later used to terminate the link.
2762 * Setting it to NULL explicitly will cause no
2763 * termination of the link.
2766 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2771 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2775 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2778 /* Defer clearing up the connection parameters
2779 * until closing to give a chance of keeping
2780 * them if a repairing happens.
2782 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2784 /* If disconnection is not requested, then
2785 * clear the connection variable so that the
2786 * link is not terminated.
2788 if (!cp->disconnect)
2792 if (cp->addr.type == BDADDR_LE_PUBLIC)
2793 addr_type = ADDR_LE_DEV_PUBLIC;
2795 addr_type = ADDR_LE_DEV_RANDOM;
2797 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2799 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2803 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2804 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2808 /* If the connection variable is set, then termination of the
2809 * link is requested.
2812 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2814 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2818 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2825 cmd->cmd_complete = addr_cmd_complete;
2827 dc.handle = cpu_to_le16(conn->handle);
2828 dc.reason = 0x13; /* Remote User Terminated Connection */
2829 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2831 mgmt_pending_remove(cmd);
2834 hci_dev_unlock(hdev);
2838 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2841 struct mgmt_cp_disconnect *cp = data;
2842 struct mgmt_rp_disconnect rp;
2843 struct pending_cmd *cmd;
2844 struct hci_conn *conn;
2849 memset(&rp, 0, sizeof(rp));
2850 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2851 rp.addr.type = cp->addr.type;
2853 if (!bdaddr_type_is_valid(cp->addr.type))
2854 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2855 MGMT_STATUS_INVALID_PARAMS,
2860 if (!test_bit(HCI_UP, &hdev->flags)) {
2861 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2862 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2866 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2867 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2868 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2872 if (cp->addr.type == BDADDR_BREDR)
2873 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2876 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2878 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2879 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2880 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2884 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2890 cmd->cmd_complete = generic_cmd_complete;
2892 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2894 mgmt_pending_remove(cmd);
2897 hci_dev_unlock(hdev);
2901 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2903 switch (link_type) {
2905 switch (addr_type) {
2906 case ADDR_LE_DEV_PUBLIC:
2907 return BDADDR_LE_PUBLIC;
2910 /* Fallback to LE Random address type */
2911 return BDADDR_LE_RANDOM;
2915 /* Fallback to BR/EDR type */
2916 return BDADDR_BREDR;
2920 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2923 struct mgmt_rp_get_connections *rp;
2933 if (!hdev_is_powered(hdev)) {
2934 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2935 MGMT_STATUS_NOT_POWERED);
2940 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2941 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2945 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2946 rp = kmalloc(rp_len, GFP_KERNEL);
2953 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2954 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2956 bacpy(&rp->addr[i].bdaddr, &c->dst);
2957 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2958 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2963 rp->conn_count = cpu_to_le16(i);
2965 /* Recalculate length in case of filtered SCO connections, etc */
2966 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2968 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2974 hci_dev_unlock(hdev);
2978 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2979 struct mgmt_cp_pin_code_neg_reply *cp)
2981 struct pending_cmd *cmd;
2984 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2989 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2990 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2992 mgmt_pending_remove(cmd);
2997 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3000 struct hci_conn *conn;
3001 struct mgmt_cp_pin_code_reply *cp = data;
3002 struct hci_cp_pin_code_reply reply;
3003 struct pending_cmd *cmd;
3010 if (!hdev_is_powered(hdev)) {
3011 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3012 MGMT_STATUS_NOT_POWERED);
3016 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3018 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3019 MGMT_STATUS_NOT_CONNECTED);
3023 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3024 struct mgmt_cp_pin_code_neg_reply ncp;
3026 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3028 BT_ERR("PIN code is not 16 bytes long");
3030 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3032 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3033 MGMT_STATUS_INVALID_PARAMS);
3038 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3044 cmd->cmd_complete = addr_cmd_complete;
3046 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3047 reply.pin_len = cp->pin_len;
3048 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3050 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3052 mgmt_pending_remove(cmd);
3055 hci_dev_unlock(hdev);
3059 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3062 struct mgmt_cp_set_io_capability *cp = data;
3066 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3067 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3068 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3072 hdev->io_capability = cp->io_capability;
3074 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3075 hdev->io_capability);
3077 hci_dev_unlock(hdev);
3079 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3083 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3085 struct hci_dev *hdev = conn->hdev;
3086 struct pending_cmd *cmd;
3088 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3089 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3092 if (cmd->user_data != conn)
3101 static int pairing_complete(struct pending_cmd *cmd, u8 status)
3103 struct mgmt_rp_pair_device rp;
3104 struct hci_conn *conn = cmd->user_data;
3107 bacpy(&rp.addr.bdaddr, &conn->dst);
3108 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3110 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3113 /* So we don't get further callbacks for this connection */
3114 conn->connect_cfm_cb = NULL;
3115 conn->security_cfm_cb = NULL;
3116 conn->disconn_cfm_cb = NULL;
3118 hci_conn_drop(conn);
3120 /* The device is paired so there is no need to remove
3121 * its connection parameters anymore.
3123 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3130 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3132 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3133 struct pending_cmd *cmd;
3135 cmd = find_pairing(conn);
3137 cmd->cmd_complete(cmd, status);
3138 mgmt_pending_remove(cmd);
3142 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3144 struct pending_cmd *cmd;
3146 BT_DBG("status %u", status);
3148 cmd = find_pairing(conn);
3150 BT_DBG("Unable to find a pending command");
3154 cmd->cmd_complete(cmd, mgmt_status(status));
3155 mgmt_pending_remove(cmd);
3158 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3160 struct pending_cmd *cmd;
3162 BT_DBG("status %u", status);
3167 cmd = find_pairing(conn);
3169 BT_DBG("Unable to find a pending command");
3173 cmd->cmd_complete(cmd, mgmt_status(status));
3174 mgmt_pending_remove(cmd);
3177 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3180 struct mgmt_cp_pair_device *cp = data;
3181 struct mgmt_rp_pair_device rp;
3182 struct pending_cmd *cmd;
3183 u8 sec_level, auth_type;
3184 struct hci_conn *conn;
3189 memset(&rp, 0, sizeof(rp));
3190 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3191 rp.addr.type = cp->addr.type;
3193 if (!bdaddr_type_is_valid(cp->addr.type))
3194 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3195 MGMT_STATUS_INVALID_PARAMS,
3198 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3199 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3200 MGMT_STATUS_INVALID_PARAMS,
3205 if (!hdev_is_powered(hdev)) {
3206 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3207 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3211 sec_level = BT_SECURITY_MEDIUM;
3212 auth_type = HCI_AT_DEDICATED_BONDING;
3214 if (cp->addr.type == BDADDR_BREDR) {
3215 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3220 /* Convert from L2CAP channel address type to HCI address type
3222 if (cp->addr.type == BDADDR_LE_PUBLIC)
3223 addr_type = ADDR_LE_DEV_PUBLIC;
3225 addr_type = ADDR_LE_DEV_RANDOM;
3227 /* When pairing a new device, it is expected to remember
3228 * this device for future connections. Adding the connection
3229 * parameter information ahead of time allows tracking
3230 * of the slave preferred values and will speed up any
3231 * further connection establishment.
3233 * If connection parameters already exist, then they
3234 * will be kept and this function does nothing.
3236 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3238 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3239 sec_level, HCI_LE_CONN_TIMEOUT,
3246 if (PTR_ERR(conn) == -EBUSY)
3247 status = MGMT_STATUS_BUSY;
3249 status = MGMT_STATUS_CONNECT_FAILED;
3251 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3257 if (conn->connect_cfm_cb) {
3258 hci_conn_drop(conn);
3259 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3260 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3264 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3267 hci_conn_drop(conn);
3271 cmd->cmd_complete = pairing_complete;
3273 /* For LE, just connecting isn't a proof that the pairing finished */
3274 if (cp->addr.type == BDADDR_BREDR) {
3275 conn->connect_cfm_cb = pairing_complete_cb;
3276 conn->security_cfm_cb = pairing_complete_cb;
3277 conn->disconn_cfm_cb = pairing_complete_cb;
3279 conn->connect_cfm_cb = le_pairing_complete_cb;
3280 conn->security_cfm_cb = le_pairing_complete_cb;
3281 conn->disconn_cfm_cb = le_pairing_complete_cb;
3284 conn->io_capability = cp->io_cap;
3285 cmd->user_data = hci_conn_get(conn);
3287 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3288 hci_conn_security(conn, sec_level, auth_type, true)) {
3289 cmd->cmd_complete(cmd, 0);
3290 mgmt_pending_remove(cmd);
3296 hci_dev_unlock(hdev);
3300 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3303 struct mgmt_addr_info *addr = data;
3304 struct pending_cmd *cmd;
3305 struct hci_conn *conn;
3312 if (!hdev_is_powered(hdev)) {
3313 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3314 MGMT_STATUS_NOT_POWERED);
3318 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3320 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3321 MGMT_STATUS_INVALID_PARAMS);
3325 conn = cmd->user_data;
3327 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3328 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3329 MGMT_STATUS_INVALID_PARAMS);
3333 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3334 mgmt_pending_remove(cmd);
3336 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3337 addr, sizeof(*addr));
3339 hci_dev_unlock(hdev);
3343 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3344 struct mgmt_addr_info *addr, u16 mgmt_op,
3345 u16 hci_op, __le32 passkey)
3347 struct pending_cmd *cmd;
3348 struct hci_conn *conn;
3353 if (!hdev_is_powered(hdev)) {
3354 err = cmd_complete(sk, hdev->id, mgmt_op,
3355 MGMT_STATUS_NOT_POWERED, addr,
3360 if (addr->type == BDADDR_BREDR)
3361 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3363 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3366 err = cmd_complete(sk, hdev->id, mgmt_op,
3367 MGMT_STATUS_NOT_CONNECTED, addr,
3372 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3373 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3375 err = cmd_complete(sk, hdev->id, mgmt_op,
3376 MGMT_STATUS_SUCCESS, addr,
3379 err = cmd_complete(sk, hdev->id, mgmt_op,
3380 MGMT_STATUS_FAILED, addr,
3386 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3392 cmd->cmd_complete = addr_cmd_complete;
3394 /* Continue with pairing via HCI */
3395 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3396 struct hci_cp_user_passkey_reply cp;
3398 bacpy(&cp.bdaddr, &addr->bdaddr);
3399 cp.passkey = passkey;
3400 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3402 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3406 mgmt_pending_remove(cmd);
3409 hci_dev_unlock(hdev);
3413 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3414 void *data, u16 len)
3416 struct mgmt_cp_pin_code_neg_reply *cp = data;
3420 return user_pairing_resp(sk, hdev, &cp->addr,
3421 MGMT_OP_PIN_CODE_NEG_REPLY,
3422 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3425 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3428 struct mgmt_cp_user_confirm_reply *cp = data;
3432 if (len != sizeof(*cp))
3433 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3434 MGMT_STATUS_INVALID_PARAMS);
3436 return user_pairing_resp(sk, hdev, &cp->addr,
3437 MGMT_OP_USER_CONFIRM_REPLY,
3438 HCI_OP_USER_CONFIRM_REPLY, 0);
3441 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3442 void *data, u16 len)
3444 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3448 return user_pairing_resp(sk, hdev, &cp->addr,
3449 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3450 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3453 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3456 struct mgmt_cp_user_passkey_reply *cp = data;
3460 return user_pairing_resp(sk, hdev, &cp->addr,
3461 MGMT_OP_USER_PASSKEY_REPLY,
3462 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3465 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3466 void *data, u16 len)
3468 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3472 return user_pairing_resp(sk, hdev, &cp->addr,
3473 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3474 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3477 static void update_name(struct hci_request *req)
3479 struct hci_dev *hdev = req->hdev;
3480 struct hci_cp_write_local_name cp;
3482 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3484 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3487 static void set_name_complete(struct hci_dev *hdev, u8 status)
3489 struct mgmt_cp_set_local_name *cp;
3490 struct pending_cmd *cmd;
3492 BT_DBG("status 0x%02x", status);
3496 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3503 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3504 mgmt_status(status));
3506 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3509 mgmt_pending_remove(cmd);
3512 hci_dev_unlock(hdev);
3515 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3518 struct mgmt_cp_set_local_name *cp = data;
3519 struct pending_cmd *cmd;
3520 struct hci_request req;
3527 /* If the old values are the same as the new ones just return a
3528 * direct command complete event.
3530 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3531 !memcmp(hdev->short_name, cp->short_name,
3532 sizeof(hdev->short_name))) {
3533 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3538 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3540 if (!hdev_is_powered(hdev)) {
3541 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3543 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3548 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3554 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3560 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3562 hci_req_init(&req, hdev);
3564 if (lmp_bredr_capable(hdev)) {
3569 /* The name is stored in the scan response data and so
3570 * no need to udpate the advertising data here.
3572 if (lmp_le_capable(hdev))
3573 update_scan_rsp_data(&req);
3575 err = hci_req_run(&req, set_name_complete);
3577 mgmt_pending_remove(cmd);
3580 hci_dev_unlock(hdev);
3584 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3585 void *data, u16 data_len)
3587 struct pending_cmd *cmd;
3590 BT_DBG("%s", hdev->name);
3594 if (!hdev_is_powered(hdev)) {
3595 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3596 MGMT_STATUS_NOT_POWERED);
3600 if (!lmp_ssp_capable(hdev)) {
3601 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3602 MGMT_STATUS_NOT_SUPPORTED);
3606 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3607 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3612 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3618 if (bredr_sc_enabled(hdev))
3619 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3622 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3625 mgmt_pending_remove(cmd);
3628 hci_dev_unlock(hdev);
3632 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3633 void *data, u16 len)
3637 BT_DBG("%s ", hdev->name);
3641 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3642 struct mgmt_cp_add_remote_oob_data *cp = data;
3645 if (cp->addr.type != BDADDR_BREDR) {
3646 err = cmd_complete(sk, hdev->id,
3647 MGMT_OP_ADD_REMOTE_OOB_DATA,
3648 MGMT_STATUS_INVALID_PARAMS,
3649 &cp->addr, sizeof(cp->addr));
3653 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3654 cp->addr.type, cp->hash,
3655 cp->rand, NULL, NULL);
3657 status = MGMT_STATUS_FAILED;
3659 status = MGMT_STATUS_SUCCESS;
3661 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3662 status, &cp->addr, sizeof(cp->addr));
3663 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3664 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3665 u8 *rand192, *hash192;
3668 if (cp->addr.type != BDADDR_BREDR) {
3669 err = cmd_complete(sk, hdev->id,
3670 MGMT_OP_ADD_REMOTE_OOB_DATA,
3671 MGMT_STATUS_INVALID_PARAMS,
3672 &cp->addr, sizeof(cp->addr));
3676 if (bdaddr_type_is_le(cp->addr.type)) {
3680 rand192 = cp->rand192;
3681 hash192 = cp->hash192;
3684 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3685 cp->addr.type, hash192, rand192,
3686 cp->hash256, cp->rand256);
3688 status = MGMT_STATUS_FAILED;
3690 status = MGMT_STATUS_SUCCESS;
3692 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3693 status, &cp->addr, sizeof(cp->addr));
3695 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3696 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3697 MGMT_STATUS_INVALID_PARAMS);
3701 hci_dev_unlock(hdev);
3705 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3706 void *data, u16 len)
3708 struct mgmt_cp_remove_remote_oob_data *cp = data;
3712 BT_DBG("%s", hdev->name);
3714 if (cp->addr.type != BDADDR_BREDR)
3715 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3716 MGMT_STATUS_INVALID_PARAMS,
3717 &cp->addr, sizeof(cp->addr));
3721 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3722 hci_remote_oob_data_clear(hdev);
3723 status = MGMT_STATUS_SUCCESS;
3727 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3729 status = MGMT_STATUS_INVALID_PARAMS;
3731 status = MGMT_STATUS_SUCCESS;
3734 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3735 status, &cp->addr, sizeof(cp->addr));
3737 hci_dev_unlock(hdev);
3741 static bool trigger_discovery(struct hci_request *req, u8 *status)
3743 struct hci_dev *hdev = req->hdev;
3744 struct hci_cp_le_set_scan_param param_cp;
3745 struct hci_cp_le_set_scan_enable enable_cp;
3746 struct hci_cp_inquiry inq_cp;
3747 /* General inquiry access code (GIAC) */
3748 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3752 switch (hdev->discovery.type) {
3753 case DISCOV_TYPE_BREDR:
3754 *status = mgmt_bredr_support(hdev);
3758 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3759 *status = MGMT_STATUS_BUSY;
3763 hci_inquiry_cache_flush(hdev);
3765 memset(&inq_cp, 0, sizeof(inq_cp));
3766 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3767 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3768 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3771 case DISCOV_TYPE_LE:
3772 case DISCOV_TYPE_INTERLEAVED:
3773 *status = mgmt_le_support(hdev);
3777 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3778 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3779 *status = MGMT_STATUS_NOT_SUPPORTED;
3783 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3784 /* Don't let discovery abort an outgoing
3785 * connection attempt that's using directed
3788 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3790 *status = MGMT_STATUS_REJECTED;
3794 disable_advertising(req);
3797 /* If controller is scanning, it means the background scanning
3798 * is running. Thus, we should temporarily stop it in order to
3799 * set the discovery scanning parameters.
3801 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3802 hci_req_add_le_scan_disable(req);
3804 memset(¶m_cp, 0, sizeof(param_cp));
3806 /* All active scans will be done with either a resolvable
3807 * private address (when privacy feature has been enabled)
3808 * or non-resolvable private address.
3810 err = hci_update_random_address(req, true, &own_addr_type);
3812 *status = MGMT_STATUS_FAILED;
3816 param_cp.type = LE_SCAN_ACTIVE;
3817 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3818 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3819 param_cp.own_address_type = own_addr_type;
3820 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3823 memset(&enable_cp, 0, sizeof(enable_cp));
3824 enable_cp.enable = LE_SCAN_ENABLE;
3825 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3826 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3831 *status = MGMT_STATUS_INVALID_PARAMS;
3838 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3840 struct pending_cmd *cmd;
3841 unsigned long timeout;
3843 BT_DBG("status %d", status);
3847 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3849 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3852 cmd->cmd_complete(cmd, mgmt_status(status));
3853 mgmt_pending_remove(cmd);
3857 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3861 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3863 switch (hdev->discovery.type) {
3864 case DISCOV_TYPE_LE:
3865 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3867 case DISCOV_TYPE_INTERLEAVED:
3868 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3870 case DISCOV_TYPE_BREDR:
3874 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3880 queue_delayed_work(hdev->workqueue,
3881 &hdev->le_scan_disable, timeout);
3884 hci_dev_unlock(hdev);
3887 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3888 void *data, u16 len)
3890 struct mgmt_cp_start_discovery *cp = data;
3891 struct pending_cmd *cmd;
3892 struct hci_request req;
3896 BT_DBG("%s", hdev->name);
3900 if (!hdev_is_powered(hdev)) {
3901 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3902 MGMT_STATUS_NOT_POWERED,
3903 &cp->type, sizeof(cp->type));
3907 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3908 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3909 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3910 MGMT_STATUS_BUSY, &cp->type,
3915 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3921 cmd->cmd_complete = generic_cmd_complete;
3923 /* Clear the discovery filter first to free any previously
3924 * allocated memory for the UUID list.
3926 hci_discovery_filter_clear(hdev);
3928 hdev->discovery.type = cp->type;
3929 hdev->discovery.report_invalid_rssi = false;
3931 hci_req_init(&req, hdev);
3933 if (!trigger_discovery(&req, &status)) {
3934 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3935 status, &cp->type, sizeof(cp->type));
3936 mgmt_pending_remove(cmd);
3940 err = hci_req_run(&req, start_discovery_complete);
3942 mgmt_pending_remove(cmd);
3946 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3949 hci_dev_unlock(hdev);
3953 static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
3955 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3959 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3960 void *data, u16 len)
3962 struct mgmt_cp_start_service_discovery *cp = data;
3963 struct pending_cmd *cmd;
3964 struct hci_request req;
3965 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3966 u16 uuid_count, expected_len;
3970 BT_DBG("%s", hdev->name);
3974 if (!hdev_is_powered(hdev)) {
3975 err = cmd_complete(sk, hdev->id,
3976 MGMT_OP_START_SERVICE_DISCOVERY,
3977 MGMT_STATUS_NOT_POWERED,
3978 &cp->type, sizeof(cp->type));
3982 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3983 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3984 err = cmd_complete(sk, hdev->id,
3985 MGMT_OP_START_SERVICE_DISCOVERY,
3986 MGMT_STATUS_BUSY, &cp->type,
3991 uuid_count = __le16_to_cpu(cp->uuid_count);
3992 if (uuid_count > max_uuid_count) {
3993 BT_ERR("service_discovery: too big uuid_count value %u",
3995 err = cmd_complete(sk, hdev->id,
3996 MGMT_OP_START_SERVICE_DISCOVERY,
3997 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4002 expected_len = sizeof(*cp) + uuid_count * 16;
4003 if (expected_len != len) {
4004 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4006 err = cmd_complete(sk, hdev->id,
4007 MGMT_OP_START_SERVICE_DISCOVERY,
4008 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4013 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4020 cmd->cmd_complete = service_discovery_cmd_complete;
4022 /* Clear the discovery filter first to free any previously
4023 * allocated memory for the UUID list.
4025 hci_discovery_filter_clear(hdev);
4027 hdev->discovery.type = cp->type;
4028 hdev->discovery.rssi = cp->rssi;
4029 hdev->discovery.uuid_count = uuid_count;
4031 if (uuid_count > 0) {
4032 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4034 if (!hdev->discovery.uuids) {
4035 err = cmd_complete(sk, hdev->id,
4036 MGMT_OP_START_SERVICE_DISCOVERY,
4038 &cp->type, sizeof(cp->type));
4039 mgmt_pending_remove(cmd);
4044 hci_req_init(&req, hdev);
4046 if (!trigger_discovery(&req, &status)) {
4047 err = cmd_complete(sk, hdev->id,
4048 MGMT_OP_START_SERVICE_DISCOVERY,
4049 status, &cp->type, sizeof(cp->type));
4050 mgmt_pending_remove(cmd);
4054 err = hci_req_run(&req, start_discovery_complete);
4056 mgmt_pending_remove(cmd);
4060 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4063 hci_dev_unlock(hdev);
4067 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
4069 struct pending_cmd *cmd;
4071 BT_DBG("status %d", status);
4075 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4077 cmd->cmd_complete(cmd, mgmt_status(status));
4078 mgmt_pending_remove(cmd);
4082 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4084 hci_dev_unlock(hdev);
4087 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4090 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4091 struct pending_cmd *cmd;
4092 struct hci_request req;
4095 BT_DBG("%s", hdev->name);
4099 if (!hci_discovery_active(hdev)) {
4100 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4101 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4102 sizeof(mgmt_cp->type));
4106 if (hdev->discovery.type != mgmt_cp->type) {
4107 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4108 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4109 sizeof(mgmt_cp->type));
4113 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4119 cmd->cmd_complete = generic_cmd_complete;
4121 hci_req_init(&req, hdev);
4123 hci_stop_discovery(&req);
4125 err = hci_req_run(&req, stop_discovery_complete);
4127 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4131 mgmt_pending_remove(cmd);
4133 /* If no HCI commands were sent we're done */
4134 if (err == -ENODATA) {
4135 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4136 &mgmt_cp->type, sizeof(mgmt_cp->type));
4137 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4141 hci_dev_unlock(hdev);
4145 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4148 struct mgmt_cp_confirm_name *cp = data;
4149 struct inquiry_entry *e;
4152 BT_DBG("%s", hdev->name);
4156 if (!hci_discovery_active(hdev)) {
4157 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4158 MGMT_STATUS_FAILED, &cp->addr,
4163 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4165 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4166 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4171 if (cp->name_known) {
4172 e->name_state = NAME_KNOWN;
4175 e->name_state = NAME_NEEDED;
4176 hci_inquiry_cache_update_resolve(hdev, e);
4179 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4183 hci_dev_unlock(hdev);
4187 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4190 struct mgmt_cp_block_device *cp = data;
4194 BT_DBG("%s", hdev->name);
4196 if (!bdaddr_type_is_valid(cp->addr.type))
4197 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4198 MGMT_STATUS_INVALID_PARAMS,
4199 &cp->addr, sizeof(cp->addr));
4203 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4206 status = MGMT_STATUS_FAILED;
4210 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4212 status = MGMT_STATUS_SUCCESS;
4215 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4216 &cp->addr, sizeof(cp->addr));
4218 hci_dev_unlock(hdev);
4223 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4226 struct mgmt_cp_unblock_device *cp = data;
4230 BT_DBG("%s", hdev->name);
4232 if (!bdaddr_type_is_valid(cp->addr.type))
4233 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4234 MGMT_STATUS_INVALID_PARAMS,
4235 &cp->addr, sizeof(cp->addr));
4239 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4242 status = MGMT_STATUS_INVALID_PARAMS;
4246 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4248 status = MGMT_STATUS_SUCCESS;
4251 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4252 &cp->addr, sizeof(cp->addr));
4254 hci_dev_unlock(hdev);
4259 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4262 struct mgmt_cp_set_device_id *cp = data;
4263 struct hci_request req;
4267 BT_DBG("%s", hdev->name);
4269 source = __le16_to_cpu(cp->source);
4271 if (source > 0x0002)
4272 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4273 MGMT_STATUS_INVALID_PARAMS);
4277 hdev->devid_source = source;
4278 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4279 hdev->devid_product = __le16_to_cpu(cp->product);
4280 hdev->devid_version = __le16_to_cpu(cp->version);
4282 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4284 hci_req_init(&req, hdev);
4286 hci_req_run(&req, NULL);
4288 hci_dev_unlock(hdev);
4293 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4295 struct cmd_lookup match = { NULL, hdev };
4300 u8 mgmt_err = mgmt_status(status);
4302 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4303 cmd_status_rsp, &mgmt_err);
4307 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4308 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4310 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4312 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4315 new_settings(hdev, match.sk);
4321 hci_dev_unlock(hdev);
4324 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4327 struct mgmt_mode *cp = data;
4328 struct pending_cmd *cmd;
4329 struct hci_request req;
4330 u8 val, enabled, status;
4333 BT_DBG("request for %s", hdev->name);
4335 status = mgmt_le_support(hdev);
4337 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4340 if (cp->val != 0x00 && cp->val != 0x01)
4341 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4342 MGMT_STATUS_INVALID_PARAMS);
4347 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4349 /* The following conditions are ones which mean that we should
4350 * not do any HCI communication but directly send a mgmt
4351 * response to user space (after toggling the flag if
4354 if (!hdev_is_powered(hdev) || val == enabled ||
4355 hci_conn_num(hdev, LE_LINK) > 0 ||
4356 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4357 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4358 bool changed = false;
4360 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4361 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4365 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4370 err = new_settings(hdev, sk);
4375 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4376 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4377 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4382 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4388 hci_req_init(&req, hdev);
4391 enable_advertising(&req);
4393 disable_advertising(&req);
4395 err = hci_req_run(&req, set_advertising_complete);
4397 mgmt_pending_remove(cmd);
4400 hci_dev_unlock(hdev);
4404 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4405 void *data, u16 len)
4407 struct mgmt_cp_set_static_address *cp = data;
4410 BT_DBG("%s", hdev->name);
4412 if (!lmp_le_capable(hdev))
4413 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4414 MGMT_STATUS_NOT_SUPPORTED);
4416 if (hdev_is_powered(hdev))
4417 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4418 MGMT_STATUS_REJECTED);
4420 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4421 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4422 return cmd_status(sk, hdev->id,
4423 MGMT_OP_SET_STATIC_ADDRESS,
4424 MGMT_STATUS_INVALID_PARAMS);
4426 /* Two most significant bits shall be set */
4427 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4428 return cmd_status(sk, hdev->id,
4429 MGMT_OP_SET_STATIC_ADDRESS,
4430 MGMT_STATUS_INVALID_PARAMS);
4435 bacpy(&hdev->static_addr, &cp->bdaddr);
4437 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4439 hci_dev_unlock(hdev);
4444 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4445 void *data, u16 len)
4447 struct mgmt_cp_set_scan_params *cp = data;
4448 __u16 interval, window;
4451 BT_DBG("%s", hdev->name);
4453 if (!lmp_le_capable(hdev))
4454 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4455 MGMT_STATUS_NOT_SUPPORTED);
4457 interval = __le16_to_cpu(cp->interval);
4459 if (interval < 0x0004 || interval > 0x4000)
4460 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4461 MGMT_STATUS_INVALID_PARAMS);
4463 window = __le16_to_cpu(cp->window);
4465 if (window < 0x0004 || window > 0x4000)
4466 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4467 MGMT_STATUS_INVALID_PARAMS);
4469 if (window > interval)
4470 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4471 MGMT_STATUS_INVALID_PARAMS);
4475 hdev->le_scan_interval = interval;
4476 hdev->le_scan_window = window;
4478 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4480 /* If background scan is running, restart it so new parameters are
4483 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4484 hdev->discovery.state == DISCOVERY_STOPPED) {
4485 struct hci_request req;
4487 hci_req_init(&req, hdev);
4489 hci_req_add_le_scan_disable(&req);
4490 hci_req_add_le_passive_scan(&req);
4492 hci_req_run(&req, NULL);
4495 hci_dev_unlock(hdev);
4500 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4502 struct pending_cmd *cmd;
4504 BT_DBG("status 0x%02x", status);
4508 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4513 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4514 mgmt_status(status));
4516 struct mgmt_mode *cp = cmd->param;
4519 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4521 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4523 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4524 new_settings(hdev, cmd->sk);
4527 mgmt_pending_remove(cmd);
4530 hci_dev_unlock(hdev);
4533 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4534 void *data, u16 len)
4536 struct mgmt_mode *cp = data;
4537 struct pending_cmd *cmd;
4538 struct hci_request req;
4541 BT_DBG("%s", hdev->name);
4543 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4544 hdev->hci_ver < BLUETOOTH_VER_1_2)
4545 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4546 MGMT_STATUS_NOT_SUPPORTED);
4548 if (cp->val != 0x00 && cp->val != 0x01)
4549 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4550 MGMT_STATUS_INVALID_PARAMS);
4552 if (!hdev_is_powered(hdev))
4553 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4554 MGMT_STATUS_NOT_POWERED);
4556 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4557 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4558 MGMT_STATUS_REJECTED);
4562 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4563 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4568 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4569 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4574 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4581 hci_req_init(&req, hdev);
4583 write_fast_connectable(&req, cp->val);
4585 err = hci_req_run(&req, fast_connectable_complete);
4587 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4588 MGMT_STATUS_FAILED);
4589 mgmt_pending_remove(cmd);
4593 hci_dev_unlock(hdev);
4598 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4600 struct pending_cmd *cmd;
4602 BT_DBG("status 0x%02x", status);
4606 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4611 u8 mgmt_err = mgmt_status(status);
4613 /* We need to restore the flag if related HCI commands
4616 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4618 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4620 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4621 new_settings(hdev, cmd->sk);
4624 mgmt_pending_remove(cmd);
4627 hci_dev_unlock(hdev);
4630 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4632 struct mgmt_mode *cp = data;
4633 struct pending_cmd *cmd;
4634 struct hci_request req;
4637 BT_DBG("request for %s", hdev->name);
4639 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4640 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4641 MGMT_STATUS_NOT_SUPPORTED);
4643 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4644 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4645 MGMT_STATUS_REJECTED);
4647 if (cp->val != 0x00 && cp->val != 0x01)
4648 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4649 MGMT_STATUS_INVALID_PARAMS);
4653 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4654 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4658 if (!hdev_is_powered(hdev)) {
4660 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4661 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4662 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4663 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4664 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4667 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4669 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4673 err = new_settings(hdev, sk);
4677 /* Reject disabling when powered on */
4679 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4680 MGMT_STATUS_REJECTED);
4684 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4685 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4690 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4696 /* We need to flip the bit already here so that update_adv_data
4697 * generates the correct flags.
4699 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4701 hci_req_init(&req, hdev);
4703 write_fast_connectable(&req, false);
4704 __hci_update_page_scan(&req);
4706 /* Since only the advertising data flags will change, there
4707 * is no need to update the scan response data.
4709 update_adv_data(&req);
4711 err = hci_req_run(&req, set_bredr_complete);
4713 mgmt_pending_remove(cmd);
4716 hci_dev_unlock(hdev);
4720 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4721 void *data, u16 len)
4723 struct mgmt_mode *cp = data;
4724 struct pending_cmd *cmd;
4728 BT_DBG("request for %s", hdev->name);
4730 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4731 !lmp_sc_capable(hdev) && !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4732 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4733 MGMT_STATUS_NOT_SUPPORTED);
4735 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4736 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4737 MGMT_STATUS_INVALID_PARAMS);
4741 if (!hdev_is_powered(hdev) ||
4742 (!lmp_sc_capable(hdev) &&
4743 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) ||
4744 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4748 changed = !test_and_set_bit(HCI_SC_ENABLED,
4750 if (cp->val == 0x02)
4751 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4753 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4755 changed = test_and_clear_bit(HCI_SC_ENABLED,
4757 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4760 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4765 err = new_settings(hdev, sk);
4770 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4771 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4778 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4779 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4780 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4784 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4790 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4792 mgmt_pending_remove(cmd);
4796 if (cp->val == 0x02)
4797 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4799 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4802 hci_dev_unlock(hdev);
4806 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4807 void *data, u16 len)
4809 struct mgmt_mode *cp = data;
4810 bool changed, use_changed;
4813 BT_DBG("request for %s", hdev->name);
4815 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4816 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4817 MGMT_STATUS_INVALID_PARAMS);
4822 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4825 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4828 if (cp->val == 0x02)
4829 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4832 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4835 if (hdev_is_powered(hdev) && use_changed &&
4836 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4837 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4838 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4839 sizeof(mode), &mode);
4842 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4847 err = new_settings(hdev, sk);
4850 hci_dev_unlock(hdev);
4854 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4857 struct mgmt_cp_set_privacy *cp = cp_data;
4861 BT_DBG("request for %s", hdev->name);
4863 if (!lmp_le_capable(hdev))
4864 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4865 MGMT_STATUS_NOT_SUPPORTED);
4867 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4868 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4869 MGMT_STATUS_INVALID_PARAMS);
4871 if (hdev_is_powered(hdev))
4872 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4873 MGMT_STATUS_REJECTED);
4877 /* If user space supports this command it is also expected to
4878 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4880 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4883 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4884 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4885 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4887 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4888 memset(hdev->irk, 0, sizeof(hdev->irk));
4889 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4892 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4897 err = new_settings(hdev, sk);
4900 hci_dev_unlock(hdev);
4904 static bool irk_is_valid(struct mgmt_irk_info *irk)
4906 switch (irk->addr.type) {
4907 case BDADDR_LE_PUBLIC:
4910 case BDADDR_LE_RANDOM:
4911 /* Two most significant bits shall be set */
4912 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4920 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4923 struct mgmt_cp_load_irks *cp = cp_data;
4924 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4925 sizeof(struct mgmt_irk_info));
4926 u16 irk_count, expected_len;
4929 BT_DBG("request for %s", hdev->name);
4931 if (!lmp_le_capable(hdev))
4932 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4933 MGMT_STATUS_NOT_SUPPORTED);
4935 irk_count = __le16_to_cpu(cp->irk_count);
4936 if (irk_count > max_irk_count) {
4937 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4938 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4939 MGMT_STATUS_INVALID_PARAMS);
4942 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4943 if (expected_len != len) {
4944 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4946 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4947 MGMT_STATUS_INVALID_PARAMS);
4950 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4952 for (i = 0; i < irk_count; i++) {
4953 struct mgmt_irk_info *key = &cp->irks[i];
4955 if (!irk_is_valid(key))
4956 return cmd_status(sk, hdev->id,
4958 MGMT_STATUS_INVALID_PARAMS);
4963 hci_smp_irks_clear(hdev);
4965 for (i = 0; i < irk_count; i++) {
4966 struct mgmt_irk_info *irk = &cp->irks[i];
4969 if (irk->addr.type == BDADDR_LE_PUBLIC)
4970 addr_type = ADDR_LE_DEV_PUBLIC;
4972 addr_type = ADDR_LE_DEV_RANDOM;
4974 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4978 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4980 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4982 hci_dev_unlock(hdev);
4987 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4989 if (key->master != 0x00 && key->master != 0x01)
4992 switch (key->addr.type) {
4993 case BDADDR_LE_PUBLIC:
4996 case BDADDR_LE_RANDOM:
4997 /* Two most significant bits shall be set */
4998 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5006 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5007 void *cp_data, u16 len)
5009 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5010 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5011 sizeof(struct mgmt_ltk_info));
5012 u16 key_count, expected_len;
5015 BT_DBG("request for %s", hdev->name);
5017 if (!lmp_le_capable(hdev))
5018 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5019 MGMT_STATUS_NOT_SUPPORTED);
5021 key_count = __le16_to_cpu(cp->key_count);
5022 if (key_count > max_key_count) {
5023 BT_ERR("load_ltks: too big key_count value %u", key_count);
5024 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5025 MGMT_STATUS_INVALID_PARAMS);
5028 expected_len = sizeof(*cp) + key_count *
5029 sizeof(struct mgmt_ltk_info);
5030 if (expected_len != len) {
5031 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5033 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5034 MGMT_STATUS_INVALID_PARAMS);
5037 BT_DBG("%s key_count %u", hdev->name, key_count);
5039 for (i = 0; i < key_count; i++) {
5040 struct mgmt_ltk_info *key = &cp->keys[i];
5042 if (!ltk_is_valid(key))
5043 return cmd_status(sk, hdev->id,
5044 MGMT_OP_LOAD_LONG_TERM_KEYS,
5045 MGMT_STATUS_INVALID_PARAMS);
5050 hci_smp_ltks_clear(hdev);
5052 for (i = 0; i < key_count; i++) {
5053 struct mgmt_ltk_info *key = &cp->keys[i];
5054 u8 type, addr_type, authenticated;
5056 if (key->addr.type == BDADDR_LE_PUBLIC)
5057 addr_type = ADDR_LE_DEV_PUBLIC;
5059 addr_type = ADDR_LE_DEV_RANDOM;
5061 switch (key->type) {
5062 case MGMT_LTK_UNAUTHENTICATED:
5063 authenticated = 0x00;
5064 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5066 case MGMT_LTK_AUTHENTICATED:
5067 authenticated = 0x01;
5068 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5070 case MGMT_LTK_P256_UNAUTH:
5071 authenticated = 0x00;
5072 type = SMP_LTK_P256;
5074 case MGMT_LTK_P256_AUTH:
5075 authenticated = 0x01;
5076 type = SMP_LTK_P256;
5078 case MGMT_LTK_P256_DEBUG:
5079 authenticated = 0x00;
5080 type = SMP_LTK_P256_DEBUG;
5085 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5086 authenticated, key->val, key->enc_size, key->ediv,
5090 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5093 hci_dev_unlock(hdev);
5098 static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5100 struct hci_conn *conn = cmd->user_data;
5101 struct mgmt_rp_get_conn_info rp;
5104 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5106 if (status == MGMT_STATUS_SUCCESS) {
5107 rp.rssi = conn->rssi;
5108 rp.tx_power = conn->tx_power;
5109 rp.max_tx_power = conn->max_tx_power;
5111 rp.rssi = HCI_RSSI_INVALID;
5112 rp.tx_power = HCI_TX_POWER_INVALID;
5113 rp.max_tx_power = HCI_TX_POWER_INVALID;
5116 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5119 hci_conn_drop(conn);
5125 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status)
5127 struct hci_cp_read_rssi *cp;
5128 struct pending_cmd *cmd;
5129 struct hci_conn *conn;
5133 BT_DBG("status 0x%02x", hci_status);
5137 /* Commands sent in request are either Read RSSI or Read Transmit Power
5138 * Level so we check which one was last sent to retrieve connection
5139 * handle. Both commands have handle as first parameter so it's safe to
5140 * cast data on the same command struct.
5142 * First command sent is always Read RSSI and we fail only if it fails.
5143 * In other case we simply override error to indicate success as we
5144 * already remembered if TX power value is actually valid.
5146 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5148 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5149 status = MGMT_STATUS_SUCCESS;
5151 status = mgmt_status(hci_status);
5155 BT_ERR("invalid sent_cmd in conn_info response");
5159 handle = __le16_to_cpu(cp->handle);
5160 conn = hci_conn_hash_lookup_handle(hdev, handle);
5162 BT_ERR("unknown handle (%d) in conn_info response", handle);
5166 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5170 cmd->cmd_complete(cmd, status);
5171 mgmt_pending_remove(cmd);
5174 hci_dev_unlock(hdev);
5177 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5180 struct mgmt_cp_get_conn_info *cp = data;
5181 struct mgmt_rp_get_conn_info rp;
5182 struct hci_conn *conn;
5183 unsigned long conn_info_age;
5186 BT_DBG("%s", hdev->name);
5188 memset(&rp, 0, sizeof(rp));
5189 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5190 rp.addr.type = cp->addr.type;
5192 if (!bdaddr_type_is_valid(cp->addr.type))
5193 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5194 MGMT_STATUS_INVALID_PARAMS,
5199 if (!hdev_is_powered(hdev)) {
5200 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5201 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5205 if (cp->addr.type == BDADDR_BREDR)
5206 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5209 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5211 if (!conn || conn->state != BT_CONNECTED) {
5212 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5213 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5217 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5218 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5219 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5223 /* To avoid client trying to guess when to poll again for information we
5224 * calculate conn info age as random value between min/max set in hdev.
5226 conn_info_age = hdev->conn_info_min_age +
5227 prandom_u32_max(hdev->conn_info_max_age -
5228 hdev->conn_info_min_age);
5230 /* Query controller to refresh cached values if they are too old or were
5233 if (time_after(jiffies, conn->conn_info_timestamp +
5234 msecs_to_jiffies(conn_info_age)) ||
5235 !conn->conn_info_timestamp) {
5236 struct hci_request req;
5237 struct hci_cp_read_tx_power req_txp_cp;
5238 struct hci_cp_read_rssi req_rssi_cp;
5239 struct pending_cmd *cmd;
5241 hci_req_init(&req, hdev);
5242 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5243 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5246 /* For LE links TX power does not change thus we don't need to
5247 * query for it once value is known.
5249 if (!bdaddr_type_is_le(cp->addr.type) ||
5250 conn->tx_power == HCI_TX_POWER_INVALID) {
5251 req_txp_cp.handle = cpu_to_le16(conn->handle);
5252 req_txp_cp.type = 0x00;
5253 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5254 sizeof(req_txp_cp), &req_txp_cp);
5257 /* Max TX power needs to be read only once per connection */
5258 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5259 req_txp_cp.handle = cpu_to_le16(conn->handle);
5260 req_txp_cp.type = 0x01;
5261 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5262 sizeof(req_txp_cp), &req_txp_cp);
5265 err = hci_req_run(&req, conn_info_refresh_complete);
5269 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5276 hci_conn_hold(conn);
5277 cmd->user_data = hci_conn_get(conn);
5278 cmd->cmd_complete = conn_info_cmd_complete;
5280 conn->conn_info_timestamp = jiffies;
5282 /* Cache is valid, just reply with values cached in hci_conn */
5283 rp.rssi = conn->rssi;
5284 rp.tx_power = conn->tx_power;
5285 rp.max_tx_power = conn->max_tx_power;
5287 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5288 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5292 hci_dev_unlock(hdev);
5296 static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5298 struct hci_conn *conn = cmd->user_data;
5299 struct mgmt_rp_get_clock_info rp;
5300 struct hci_dev *hdev;
5303 memset(&rp, 0, sizeof(rp));
5304 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5309 hdev = hci_dev_get(cmd->index);
5311 rp.local_clock = cpu_to_le32(hdev->clock);
5316 rp.piconet_clock = cpu_to_le32(conn->clock);
5317 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5321 err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5325 hci_conn_drop(conn);
5332 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5334 struct hci_cp_read_clock *hci_cp;
5335 struct pending_cmd *cmd;
5336 struct hci_conn *conn;
5338 BT_DBG("%s status %u", hdev->name, status);
5342 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5346 if (hci_cp->which) {
5347 u16 handle = __le16_to_cpu(hci_cp->handle);
5348 conn = hci_conn_hash_lookup_handle(hdev, handle);
5353 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5357 cmd->cmd_complete(cmd, mgmt_status(status));
5358 mgmt_pending_remove(cmd);
5361 hci_dev_unlock(hdev);
5364 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5367 struct mgmt_cp_get_clock_info *cp = data;
5368 struct mgmt_rp_get_clock_info rp;
5369 struct hci_cp_read_clock hci_cp;
5370 struct pending_cmd *cmd;
5371 struct hci_request req;
5372 struct hci_conn *conn;
5375 BT_DBG("%s", hdev->name);
5377 memset(&rp, 0, sizeof(rp));
5378 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5379 rp.addr.type = cp->addr.type;
5381 if (cp->addr.type != BDADDR_BREDR)
5382 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5383 MGMT_STATUS_INVALID_PARAMS,
5388 if (!hdev_is_powered(hdev)) {
5389 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5390 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5394 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5395 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5397 if (!conn || conn->state != BT_CONNECTED) {
5398 err = cmd_complete(sk, hdev->id,
5399 MGMT_OP_GET_CLOCK_INFO,
5400 MGMT_STATUS_NOT_CONNECTED,
5408 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5414 cmd->cmd_complete = clock_info_cmd_complete;
5416 hci_req_init(&req, hdev);
5418 memset(&hci_cp, 0, sizeof(hci_cp));
5419 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5422 hci_conn_hold(conn);
5423 cmd->user_data = hci_conn_get(conn);
5425 hci_cp.handle = cpu_to_le16(conn->handle);
5426 hci_cp.which = 0x01; /* Piconet clock */
5427 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5430 err = hci_req_run(&req, get_clock_info_complete);
5432 mgmt_pending_remove(cmd);
5435 hci_dev_unlock(hdev);
5439 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5441 struct hci_conn *conn;
5443 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5447 if (conn->dst_type != type)
5450 if (conn->state != BT_CONNECTED)
5456 /* This function requires the caller holds hdev->lock */
5457 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5458 u8 addr_type, u8 auto_connect)
5460 struct hci_dev *hdev = req->hdev;
5461 struct hci_conn_params *params;
5463 params = hci_conn_params_add(hdev, addr, addr_type);
5467 if (params->auto_connect == auto_connect)
5470 list_del_init(¶ms->action);
5472 switch (auto_connect) {
5473 case HCI_AUTO_CONN_DISABLED:
5474 case HCI_AUTO_CONN_LINK_LOSS:
5475 __hci_update_background_scan(req);
5477 case HCI_AUTO_CONN_REPORT:
5478 list_add(¶ms->action, &hdev->pend_le_reports);
5479 __hci_update_background_scan(req);
5481 case HCI_AUTO_CONN_DIRECT:
5482 case HCI_AUTO_CONN_ALWAYS:
5483 if (!is_connected(hdev, addr, addr_type)) {
5484 list_add(¶ms->action, &hdev->pend_le_conns);
5485 __hci_update_background_scan(req);
5490 params->auto_connect = auto_connect;
5492 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5498 static void device_added(struct sock *sk, struct hci_dev *hdev,
5499 bdaddr_t *bdaddr, u8 type, u8 action)
5501 struct mgmt_ev_device_added ev;
5503 bacpy(&ev.addr.bdaddr, bdaddr);
5504 ev.addr.type = type;
5507 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5510 static void add_device_complete(struct hci_dev *hdev, u8 status)
5512 struct pending_cmd *cmd;
5514 BT_DBG("status 0x%02x", status);
5518 cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
5522 cmd->cmd_complete(cmd, mgmt_status(status));
5523 mgmt_pending_remove(cmd);
5526 hci_dev_unlock(hdev);
5529 static int add_device(struct sock *sk, struct hci_dev *hdev,
5530 void *data, u16 len)
5532 struct mgmt_cp_add_device *cp = data;
5533 struct pending_cmd *cmd;
5534 struct hci_request req;
5535 u8 auto_conn, addr_type;
5538 BT_DBG("%s", hdev->name);
5540 if (!bdaddr_type_is_valid(cp->addr.type) ||
5541 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5542 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5543 MGMT_STATUS_INVALID_PARAMS,
5544 &cp->addr, sizeof(cp->addr));
5546 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5547 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5548 MGMT_STATUS_INVALID_PARAMS,
5549 &cp->addr, sizeof(cp->addr));
5551 hci_req_init(&req, hdev);
5555 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5561 cmd->cmd_complete = addr_cmd_complete;
5563 if (cp->addr.type == BDADDR_BREDR) {
5564 /* Only incoming connections action is supported for now */
5565 if (cp->action != 0x01) {
5566 err = cmd->cmd_complete(cmd,
5567 MGMT_STATUS_INVALID_PARAMS);
5568 mgmt_pending_remove(cmd);
5572 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5577 __hci_update_page_scan(&req);
5582 if (cp->addr.type == BDADDR_LE_PUBLIC)
5583 addr_type = ADDR_LE_DEV_PUBLIC;
5585 addr_type = ADDR_LE_DEV_RANDOM;
5587 if (cp->action == 0x02)
5588 auto_conn = HCI_AUTO_CONN_ALWAYS;
5589 else if (cp->action == 0x01)
5590 auto_conn = HCI_AUTO_CONN_DIRECT;
5592 auto_conn = HCI_AUTO_CONN_REPORT;
5594 /* If the connection parameters don't exist for this device,
5595 * they will be created and configured with defaults.
5597 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5599 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5600 mgmt_pending_remove(cmd);
5605 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5607 err = hci_req_run(&req, add_device_complete);
5609 /* ENODATA means no HCI commands were needed (e.g. if
5610 * the adapter is powered off).
5612 if (err == -ENODATA)
5613 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5614 mgmt_pending_remove(cmd);
5618 hci_dev_unlock(hdev);
5622 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5623 bdaddr_t *bdaddr, u8 type)
5625 struct mgmt_ev_device_removed ev;
5627 bacpy(&ev.addr.bdaddr, bdaddr);
5628 ev.addr.type = type;
5630 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5633 static void remove_device_complete(struct hci_dev *hdev, u8 status)
5635 struct pending_cmd *cmd;
5637 BT_DBG("status 0x%02x", status);
5641 cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5645 cmd->cmd_complete(cmd, mgmt_status(status));
5646 mgmt_pending_remove(cmd);
5649 hci_dev_unlock(hdev);
5652 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5653 void *data, u16 len)
5655 struct mgmt_cp_remove_device *cp = data;
5656 struct pending_cmd *cmd;
5657 struct hci_request req;
5660 BT_DBG("%s", hdev->name);
5662 hci_req_init(&req, hdev);
5666 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5672 cmd->cmd_complete = addr_cmd_complete;
5674 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5675 struct hci_conn_params *params;
5678 if (!bdaddr_type_is_valid(cp->addr.type)) {
5679 err = cmd->cmd_complete(cmd,
5680 MGMT_STATUS_INVALID_PARAMS);
5681 mgmt_pending_remove(cmd);
5685 if (cp->addr.type == BDADDR_BREDR) {
5686 err = hci_bdaddr_list_del(&hdev->whitelist,
5690 err = cmd->cmd_complete(cmd,
5691 MGMT_STATUS_INVALID_PARAMS);
5692 mgmt_pending_remove(cmd);
5696 __hci_update_page_scan(&req);
5698 device_removed(sk, hdev, &cp->addr.bdaddr,
5703 if (cp->addr.type == BDADDR_LE_PUBLIC)
5704 addr_type = ADDR_LE_DEV_PUBLIC;
5706 addr_type = ADDR_LE_DEV_RANDOM;
5708 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5711 err = cmd->cmd_complete(cmd,
5712 MGMT_STATUS_INVALID_PARAMS);
5713 mgmt_pending_remove(cmd);
5717 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5718 err = cmd->cmd_complete(cmd,
5719 MGMT_STATUS_INVALID_PARAMS);
5720 mgmt_pending_remove(cmd);
5724 list_del(¶ms->action);
5725 list_del(¶ms->list);
5727 __hci_update_background_scan(&req);
5729 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5731 struct hci_conn_params *p, *tmp;
5732 struct bdaddr_list *b, *btmp;
5734 if (cp->addr.type) {
5735 err = cmd->cmd_complete(cmd,
5736 MGMT_STATUS_INVALID_PARAMS);
5737 mgmt_pending_remove(cmd);
5741 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5742 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5747 __hci_update_page_scan(&req);
5749 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5750 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5752 device_removed(sk, hdev, &p->addr, p->addr_type);
5753 list_del(&p->action);
5758 BT_DBG("All LE connection parameters were removed");
5760 __hci_update_background_scan(&req);
5764 err = hci_req_run(&req, remove_device_complete);
5766 /* ENODATA means no HCI commands were needed (e.g. if
5767 * the adapter is powered off).
5769 if (err == -ENODATA)
5770 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5771 mgmt_pending_remove(cmd);
5775 hci_dev_unlock(hdev);
5779 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5782 struct mgmt_cp_load_conn_param *cp = data;
5783 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5784 sizeof(struct mgmt_conn_param));
5785 u16 param_count, expected_len;
5788 if (!lmp_le_capable(hdev))
5789 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5790 MGMT_STATUS_NOT_SUPPORTED);
5792 param_count = __le16_to_cpu(cp->param_count);
5793 if (param_count > max_param_count) {
5794 BT_ERR("load_conn_param: too big param_count value %u",
5796 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5797 MGMT_STATUS_INVALID_PARAMS);
5800 expected_len = sizeof(*cp) + param_count *
5801 sizeof(struct mgmt_conn_param);
5802 if (expected_len != len) {
5803 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5805 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5806 MGMT_STATUS_INVALID_PARAMS);
5809 BT_DBG("%s param_count %u", hdev->name, param_count);
5813 hci_conn_params_clear_disabled(hdev);
5815 for (i = 0; i < param_count; i++) {
5816 struct mgmt_conn_param *param = &cp->params[i];
5817 struct hci_conn_params *hci_param;
5818 u16 min, max, latency, timeout;
5821 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5824 if (param->addr.type == BDADDR_LE_PUBLIC) {
5825 addr_type = ADDR_LE_DEV_PUBLIC;
5826 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5827 addr_type = ADDR_LE_DEV_RANDOM;
5829 BT_ERR("Ignoring invalid connection parameters");
5833 min = le16_to_cpu(param->min_interval);
5834 max = le16_to_cpu(param->max_interval);
5835 latency = le16_to_cpu(param->latency);
5836 timeout = le16_to_cpu(param->timeout);
5838 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5839 min, max, latency, timeout);
5841 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5842 BT_ERR("Ignoring invalid connection parameters");
5846 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5849 BT_ERR("Failed to add connection parameters");
5853 hci_param->conn_min_interval = min;
5854 hci_param->conn_max_interval = max;
5855 hci_param->conn_latency = latency;
5856 hci_param->supervision_timeout = timeout;
5859 hci_dev_unlock(hdev);
5861 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5864 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5865 void *data, u16 len)
5867 struct mgmt_cp_set_external_config *cp = data;
5871 BT_DBG("%s", hdev->name);
5873 if (hdev_is_powered(hdev))
5874 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5875 MGMT_STATUS_REJECTED);
5877 if (cp->config != 0x00 && cp->config != 0x01)
5878 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5879 MGMT_STATUS_INVALID_PARAMS);
5881 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5882 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5883 MGMT_STATUS_NOT_SUPPORTED);
5888 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5891 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5894 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5901 err = new_options(hdev, sk);
5903 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5904 mgmt_index_removed(hdev);
5906 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5907 set_bit(HCI_CONFIG, &hdev->dev_flags);
5908 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5910 queue_work(hdev->req_workqueue, &hdev->power_on);
5912 set_bit(HCI_RAW, &hdev->flags);
5913 mgmt_index_added(hdev);
5918 hci_dev_unlock(hdev);
5922 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5923 void *data, u16 len)
5925 struct mgmt_cp_set_public_address *cp = data;
5929 BT_DBG("%s", hdev->name);
5931 if (hdev_is_powered(hdev))
5932 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5933 MGMT_STATUS_REJECTED);
5935 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5936 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5937 MGMT_STATUS_INVALID_PARAMS);
5939 if (!hdev->set_bdaddr)
5940 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5941 MGMT_STATUS_NOT_SUPPORTED);
5945 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5946 bacpy(&hdev->public_addr, &cp->bdaddr);
5948 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5955 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5956 err = new_options(hdev, sk);
5958 if (is_configured(hdev)) {
5959 mgmt_index_removed(hdev);
5961 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5963 set_bit(HCI_CONFIG, &hdev->dev_flags);
5964 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5966 queue_work(hdev->req_workqueue, &hdev->power_on);
5970 hci_dev_unlock(hdev);
5974 static const struct mgmt_handler {
5975 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5979 } mgmt_handlers[] = {
5980 { NULL }, /* 0x0000 (no command) */
5981 { read_version, false, MGMT_READ_VERSION_SIZE },
5982 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5983 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5984 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5985 { set_powered, false, MGMT_SETTING_SIZE },
5986 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5987 { set_connectable, false, MGMT_SETTING_SIZE },
5988 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5989 { set_bondable, false, MGMT_SETTING_SIZE },
5990 { set_link_security, false, MGMT_SETTING_SIZE },
5991 { set_ssp, false, MGMT_SETTING_SIZE },
5992 { set_hs, false, MGMT_SETTING_SIZE },
5993 { set_le, false, MGMT_SETTING_SIZE },
5994 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5995 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5996 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5997 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5998 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5999 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
6000 { disconnect, false, MGMT_DISCONNECT_SIZE },
6001 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
6002 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
6003 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6004 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
6005 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
6006 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6007 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
6008 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
6009 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6010 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
6011 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6012 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6013 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
6014 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6015 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
6016 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
6017 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
6018 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
6019 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
6020 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
6021 { set_advertising, false, MGMT_SETTING_SIZE },
6022 { set_bredr, false, MGMT_SETTING_SIZE },
6023 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
6024 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
6025 { set_secure_conn, false, MGMT_SETTING_SIZE },
6026 { set_debug_keys, false, MGMT_SETTING_SIZE },
6027 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
6028 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
6029 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
6030 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
6031 { add_device, false, MGMT_ADD_DEVICE_SIZE },
6032 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
6033 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
6034 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
6035 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
6036 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
6037 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
6038 { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
6041 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
6045 struct mgmt_hdr *hdr;
6046 u16 opcode, index, len;
6047 struct hci_dev *hdev = NULL;
6048 const struct mgmt_handler *handler;
6051 BT_DBG("got %zu bytes", msglen);
6053 if (msglen < sizeof(*hdr))
6056 buf = kmalloc(msglen, GFP_KERNEL);
6060 if (memcpy_from_msg(buf, msg, msglen)) {
6066 opcode = __le16_to_cpu(hdr->opcode);
6067 index = __le16_to_cpu(hdr->index);
6068 len = __le16_to_cpu(hdr->len);
6070 if (len != msglen - sizeof(*hdr)) {
6075 if (index != MGMT_INDEX_NONE) {
6076 hdev = hci_dev_get(index);
6078 err = cmd_status(sk, index, opcode,
6079 MGMT_STATUS_INVALID_INDEX);
6083 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
6084 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
6085 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
6086 err = cmd_status(sk, index, opcode,
6087 MGMT_STATUS_INVALID_INDEX);
6091 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
6092 opcode != MGMT_OP_READ_CONFIG_INFO &&
6093 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
6094 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
6095 err = cmd_status(sk, index, opcode,
6096 MGMT_STATUS_INVALID_INDEX);
6101 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
6102 mgmt_handlers[opcode].func == NULL) {
6103 BT_DBG("Unknown op %u", opcode);
6104 err = cmd_status(sk, index, opcode,
6105 MGMT_STATUS_UNKNOWN_COMMAND);
6109 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
6110 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6111 err = cmd_status(sk, index, opcode,
6112 MGMT_STATUS_INVALID_INDEX);
6116 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
6117 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6118 err = cmd_status(sk, index, opcode,
6119 MGMT_STATUS_INVALID_INDEX);
6123 handler = &mgmt_handlers[opcode];
6125 if ((handler->var_len && len < handler->data_len) ||
6126 (!handler->var_len && len != handler->data_len)) {
6127 err = cmd_status(sk, index, opcode,
6128 MGMT_STATUS_INVALID_PARAMS);
6133 mgmt_init_hdev(sk, hdev);
6135 cp = buf + sizeof(*hdr);
6137 err = handler->func(sk, hdev, cp, len);
6151 void mgmt_index_added(struct hci_dev *hdev)
6153 if (hdev->dev_type != HCI_BREDR)
6156 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6159 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6160 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6162 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6165 void mgmt_index_removed(struct hci_dev *hdev)
6167 u8 status = MGMT_STATUS_INVALID_INDEX;
6169 if (hdev->dev_type != HCI_BREDR)
6172 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6175 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6177 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6178 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6180 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6183 /* This function requires the caller holds hdev->lock */
6184 static void restart_le_actions(struct hci_request *req)
6186 struct hci_dev *hdev = req->hdev;
6187 struct hci_conn_params *p;
6189 list_for_each_entry(p, &hdev->le_conn_params, list) {
6190 /* Needed for AUTO_OFF case where might not "really"
6191 * have been powered off.
6193 list_del_init(&p->action);
6195 switch (p->auto_connect) {
6196 case HCI_AUTO_CONN_DIRECT:
6197 case HCI_AUTO_CONN_ALWAYS:
6198 list_add(&p->action, &hdev->pend_le_conns);
6200 case HCI_AUTO_CONN_REPORT:
6201 list_add(&p->action, &hdev->pend_le_reports);
6208 __hci_update_background_scan(req);
6211 static void powered_complete(struct hci_dev *hdev, u8 status)
6213 struct cmd_lookup match = { NULL, hdev };
6215 BT_DBG("status 0x%02x", status);
6219 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6221 new_settings(hdev, match.sk);
6223 hci_dev_unlock(hdev);
6229 static int powered_update_hci(struct hci_dev *hdev)
6231 struct hci_request req;
6234 hci_req_init(&req, hdev);
6236 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6237 !lmp_host_ssp_capable(hdev)) {
6240 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
6243 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6245 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, sizeof(sc), &sc);
6248 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6249 lmp_bredr_capable(hdev)) {
6250 struct hci_cp_write_le_host_supported cp;
6255 /* Check first if we already have the right
6256 * host state (host features set)
6258 if (cp.le != lmp_host_le_capable(hdev) ||
6259 cp.simul != lmp_host_le_br_capable(hdev))
6260 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6264 if (lmp_le_capable(hdev)) {
6265 /* Make sure the controller has a good default for
6266 * advertising data. This also applies to the case
6267 * where BR/EDR was toggled during the AUTO_OFF phase.
6269 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6270 update_adv_data(&req);
6271 update_scan_rsp_data(&req);
6274 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6275 enable_advertising(&req);
6277 restart_le_actions(&req);
6280 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6281 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6282 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6283 sizeof(link_sec), &link_sec);
6285 if (lmp_bredr_capable(hdev)) {
6286 write_fast_connectable(&req, false);
6287 __hci_update_page_scan(&req);
6293 return hci_req_run(&req, powered_complete);
6296 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6298 struct cmd_lookup match = { NULL, hdev };
6299 u8 status, zero_cod[] = { 0, 0, 0 };
6302 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6306 if (powered_update_hci(hdev) == 0)
6309 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6314 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6316 /* If the power off is because of hdev unregistration let
6317 * use the appropriate INVALID_INDEX status. Otherwise use
6318 * NOT_POWERED. We cover both scenarios here since later in
6319 * mgmt_index_removed() any hci_conn callbacks will have already
6320 * been triggered, potentially causing misleading DISCONNECTED
6323 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
6324 status = MGMT_STATUS_INVALID_INDEX;
6326 status = MGMT_STATUS_NOT_POWERED;
6328 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6330 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6331 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6332 zero_cod, sizeof(zero_cod), NULL);
6335 err = new_settings(hdev, match.sk);
6343 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6345 struct pending_cmd *cmd;
6348 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6352 if (err == -ERFKILL)
6353 status = MGMT_STATUS_RFKILLED;
6355 status = MGMT_STATUS_FAILED;
6357 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6359 mgmt_pending_remove(cmd);
6362 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6364 struct hci_request req;
6368 /* When discoverable timeout triggers, then just make sure
6369 * the limited discoverable flag is cleared. Even in the case
6370 * of a timeout triggered from general discoverable, it is
6371 * safe to unconditionally clear the flag.
6373 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6374 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6376 hci_req_init(&req, hdev);
6377 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6378 u8 scan = SCAN_PAGE;
6379 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6380 sizeof(scan), &scan);
6383 update_adv_data(&req);
6384 hci_req_run(&req, NULL);
6386 hdev->discov_timeout = 0;
6388 new_settings(hdev, NULL);
6390 hci_dev_unlock(hdev);
6393 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6396 struct mgmt_ev_new_link_key ev;
6398 memset(&ev, 0, sizeof(ev));
6400 ev.store_hint = persistent;
6401 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6402 ev.key.addr.type = BDADDR_BREDR;
6403 ev.key.type = key->type;
6404 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6405 ev.key.pin_len = key->pin_len;
6407 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6410 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6412 switch (ltk->type) {
6415 if (ltk->authenticated)
6416 return MGMT_LTK_AUTHENTICATED;
6417 return MGMT_LTK_UNAUTHENTICATED;
6419 if (ltk->authenticated)
6420 return MGMT_LTK_P256_AUTH;
6421 return MGMT_LTK_P256_UNAUTH;
6422 case SMP_LTK_P256_DEBUG:
6423 return MGMT_LTK_P256_DEBUG;
6426 return MGMT_LTK_UNAUTHENTICATED;
6429 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6431 struct mgmt_ev_new_long_term_key ev;
6433 memset(&ev, 0, sizeof(ev));
6435 /* Devices using resolvable or non-resolvable random addresses
6436 * without providing an indentity resolving key don't require
6437 * to store long term keys. Their addresses will change the
6440 * Only when a remote device provides an identity address
6441 * make sure the long term key is stored. If the remote
6442 * identity is known, the long term keys are internally
6443 * mapped to the identity address. So allow static random
6444 * and public addresses here.
6446 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6447 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6448 ev.store_hint = 0x00;
6450 ev.store_hint = persistent;
6452 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6453 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6454 ev.key.type = mgmt_ltk_type(key);
6455 ev.key.enc_size = key->enc_size;
6456 ev.key.ediv = key->ediv;
6457 ev.key.rand = key->rand;
6459 if (key->type == SMP_LTK)
6462 memcpy(ev.key.val, key->val, sizeof(key->val));
6464 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6467 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6469 struct mgmt_ev_new_irk ev;
6471 memset(&ev, 0, sizeof(ev));
6473 /* For identity resolving keys from devices that are already
6474 * using a public address or static random address, do not
6475 * ask for storing this key. The identity resolving key really
6476 * is only mandatory for devices using resovlable random
6479 * Storing all identity resolving keys has the downside that
6480 * they will be also loaded on next boot of they system. More
6481 * identity resolving keys, means more time during scanning is
6482 * needed to actually resolve these addresses.
6484 if (bacmp(&irk->rpa, BDADDR_ANY))
6485 ev.store_hint = 0x01;
6487 ev.store_hint = 0x00;
6489 bacpy(&ev.rpa, &irk->rpa);
6490 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6491 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6492 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6494 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6497 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6500 struct mgmt_ev_new_csrk ev;
6502 memset(&ev, 0, sizeof(ev));
6504 /* Devices using resolvable or non-resolvable random addresses
6505 * without providing an indentity resolving key don't require
6506 * to store signature resolving keys. Their addresses will change
6507 * the next time around.
6509 * Only when a remote device provides an identity address
6510 * make sure the signature resolving key is stored. So allow
6511 * static random and public addresses here.
6513 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6514 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6515 ev.store_hint = 0x00;
6517 ev.store_hint = persistent;
6519 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6520 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6521 ev.key.master = csrk->master;
6522 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6524 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6527 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6528 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6529 u16 max_interval, u16 latency, u16 timeout)
6531 struct mgmt_ev_new_conn_param ev;
6533 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6536 memset(&ev, 0, sizeof(ev));
6537 bacpy(&ev.addr.bdaddr, bdaddr);
6538 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6539 ev.store_hint = store_hint;
6540 ev.min_interval = cpu_to_le16(min_interval);
6541 ev.max_interval = cpu_to_le16(max_interval);
6542 ev.latency = cpu_to_le16(latency);
6543 ev.timeout = cpu_to_le16(timeout);
6545 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6548 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6551 eir[eir_len++] = sizeof(type) + data_len;
6552 eir[eir_len++] = type;
6553 memcpy(&eir[eir_len], data, data_len);
6554 eir_len += data_len;
6559 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6560 u32 flags, u8 *name, u8 name_len)
6563 struct mgmt_ev_device_connected *ev = (void *) buf;
6566 bacpy(&ev->addr.bdaddr, &conn->dst);
6567 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6569 ev->flags = __cpu_to_le32(flags);
6571 /* We must ensure that the EIR Data fields are ordered and
6572 * unique. Keep it simple for now and avoid the problem by not
6573 * adding any BR/EDR data to the LE adv.
6575 if (conn->le_adv_data_len > 0) {
6576 memcpy(&ev->eir[eir_len],
6577 conn->le_adv_data, conn->le_adv_data_len);
6578 eir_len = conn->le_adv_data_len;
6581 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6584 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6585 eir_len = eir_append_data(ev->eir, eir_len,
6587 conn->dev_class, 3);
6590 ev->eir_len = cpu_to_le16(eir_len);
6592 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6593 sizeof(*ev) + eir_len, NULL);
6596 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6598 struct sock **sk = data;
6600 cmd->cmd_complete(cmd, 0);
6605 mgmt_pending_remove(cmd);
6608 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6610 struct hci_dev *hdev = data;
6611 struct mgmt_cp_unpair_device *cp = cmd->param;
6613 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6615 cmd->cmd_complete(cmd, 0);
6616 mgmt_pending_remove(cmd);
6619 bool mgmt_powering_down(struct hci_dev *hdev)
6621 struct pending_cmd *cmd;
6622 struct mgmt_mode *cp;
6624 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6635 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6636 u8 link_type, u8 addr_type, u8 reason,
6637 bool mgmt_connected)
6639 struct mgmt_ev_device_disconnected ev;
6640 struct sock *sk = NULL;
6642 /* The connection is still in hci_conn_hash so test for 1
6643 * instead of 0 to know if this is the last one.
6645 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6646 cancel_delayed_work(&hdev->power_off);
6647 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6650 if (!mgmt_connected)
6653 if (link_type != ACL_LINK && link_type != LE_LINK)
6656 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6658 bacpy(&ev.addr.bdaddr, bdaddr);
6659 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6662 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6667 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6671 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6672 u8 link_type, u8 addr_type, u8 status)
6674 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6675 struct mgmt_cp_disconnect *cp;
6676 struct pending_cmd *cmd;
6678 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6681 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6687 if (bacmp(bdaddr, &cp->addr.bdaddr))
6690 if (cp->addr.type != bdaddr_type)
6693 cmd->cmd_complete(cmd, mgmt_status(status));
6694 mgmt_pending_remove(cmd);
6697 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6698 u8 addr_type, u8 status)
6700 struct mgmt_ev_connect_failed ev;
6702 /* The connection is still in hci_conn_hash so test for 1
6703 * instead of 0 to know if this is the last one.
6705 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6706 cancel_delayed_work(&hdev->power_off);
6707 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6710 bacpy(&ev.addr.bdaddr, bdaddr);
6711 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6712 ev.status = mgmt_status(status);
6714 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6717 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6719 struct mgmt_ev_pin_code_request ev;
6721 bacpy(&ev.addr.bdaddr, bdaddr);
6722 ev.addr.type = BDADDR_BREDR;
6725 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6728 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6731 struct pending_cmd *cmd;
6733 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6737 cmd->cmd_complete(cmd, mgmt_status(status));
6738 mgmt_pending_remove(cmd);
6741 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6744 struct pending_cmd *cmd;
6746 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6750 cmd->cmd_complete(cmd, mgmt_status(status));
6751 mgmt_pending_remove(cmd);
6754 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6755 u8 link_type, u8 addr_type, u32 value,
6758 struct mgmt_ev_user_confirm_request ev;
6760 BT_DBG("%s", hdev->name);
6762 bacpy(&ev.addr.bdaddr, bdaddr);
6763 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6764 ev.confirm_hint = confirm_hint;
6765 ev.value = cpu_to_le32(value);
6767 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6771 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6772 u8 link_type, u8 addr_type)
6774 struct mgmt_ev_user_passkey_request ev;
6776 BT_DBG("%s", hdev->name);
6778 bacpy(&ev.addr.bdaddr, bdaddr);
6779 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6781 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6785 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6786 u8 link_type, u8 addr_type, u8 status,
6789 struct pending_cmd *cmd;
6791 cmd = mgmt_pending_find(opcode, hdev);
6795 cmd->cmd_complete(cmd, mgmt_status(status));
6796 mgmt_pending_remove(cmd);
6801 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6802 u8 link_type, u8 addr_type, u8 status)
6804 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6805 status, MGMT_OP_USER_CONFIRM_REPLY);
6808 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6809 u8 link_type, u8 addr_type, u8 status)
6811 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6813 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6816 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6817 u8 link_type, u8 addr_type, u8 status)
6819 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6820 status, MGMT_OP_USER_PASSKEY_REPLY);
6823 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6824 u8 link_type, u8 addr_type, u8 status)
6826 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6828 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6831 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6832 u8 link_type, u8 addr_type, u32 passkey,
6835 struct mgmt_ev_passkey_notify ev;
6837 BT_DBG("%s", hdev->name);
6839 bacpy(&ev.addr.bdaddr, bdaddr);
6840 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6841 ev.passkey = __cpu_to_le32(passkey);
6842 ev.entered = entered;
6844 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6847 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6849 struct mgmt_ev_auth_failed ev;
6850 struct pending_cmd *cmd;
6851 u8 status = mgmt_status(hci_status);
6853 bacpy(&ev.addr.bdaddr, &conn->dst);
6854 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6857 cmd = find_pairing(conn);
6859 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6860 cmd ? cmd->sk : NULL);
6863 cmd->cmd_complete(cmd, status);
6864 mgmt_pending_remove(cmd);
6868 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6870 struct cmd_lookup match = { NULL, hdev };
6874 u8 mgmt_err = mgmt_status(status);
6875 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6876 cmd_status_rsp, &mgmt_err);
6880 if (test_bit(HCI_AUTH, &hdev->flags))
6881 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6884 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6887 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6891 new_settings(hdev, match.sk);
6897 static void clear_eir(struct hci_request *req)
6899 struct hci_dev *hdev = req->hdev;
6900 struct hci_cp_write_eir cp;
6902 if (!lmp_ext_inq_capable(hdev))
6905 memset(hdev->eir, 0, sizeof(hdev->eir));
6907 memset(&cp, 0, sizeof(cp));
6909 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6912 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6914 struct cmd_lookup match = { NULL, hdev };
6915 struct hci_request req;
6916 bool changed = false;
6919 u8 mgmt_err = mgmt_status(status);
6921 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6922 &hdev->dev_flags)) {
6923 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6924 new_settings(hdev, NULL);
6927 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6933 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6935 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6937 changed = test_and_clear_bit(HCI_HS_ENABLED,
6940 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6943 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6946 new_settings(hdev, match.sk);
6951 hci_req_init(&req, hdev);
6953 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6954 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6955 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6956 sizeof(enable), &enable);
6962 hci_req_run(&req, NULL);
6965 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6967 struct cmd_lookup match = { NULL, hdev };
6968 bool changed = false;
6971 u8 mgmt_err = mgmt_status(status);
6974 if (test_and_clear_bit(HCI_SC_ENABLED,
6976 new_settings(hdev, NULL);
6977 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6980 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6981 cmd_status_rsp, &mgmt_err);
6986 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6988 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6989 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6992 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6993 settings_rsp, &match);
6996 new_settings(hdev, match.sk);
7002 static void sk_lookup(struct pending_cmd *cmd, void *data)
7004 struct cmd_lookup *match = data;
7006 if (match->sk == NULL) {
7007 match->sk = cmd->sk;
7008 sock_hold(match->sk);
7012 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7015 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7017 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7018 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7019 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7022 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
7029 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7031 struct mgmt_cp_set_local_name ev;
7032 struct pending_cmd *cmd;
7037 memset(&ev, 0, sizeof(ev));
7038 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7039 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7041 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7043 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7045 /* If this is a HCI command related to powering on the
7046 * HCI dev don't send any mgmt signals.
7048 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
7052 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7053 cmd ? cmd->sk : NULL);
7056 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7057 u8 *rand192, u8 *hash256, u8 *rand256,
7060 struct pending_cmd *cmd;
7062 BT_DBG("%s status %u", hdev->name, status);
7064 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7069 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7070 mgmt_status(status));
7072 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7073 struct mgmt_rp_read_local_oob_ext_data rp;
7075 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7076 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7078 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7079 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7081 cmd_complete(cmd->sk, hdev->id,
7082 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7085 struct mgmt_rp_read_local_oob_data rp;
7087 memcpy(rp.hash, hash192, sizeof(rp.hash));
7088 memcpy(rp.rand, rand192, sizeof(rp.rand));
7090 cmd_complete(cmd->sk, hdev->id,
7091 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7096 mgmt_pending_remove(cmd);
7099 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7103 for (i = 0; i < uuid_count; i++) {
7104 if (!memcmp(uuid, uuids[i], 16))
7111 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7115 while (parsed < eir_len) {
7116 u8 field_len = eir[0];
7123 if (eir_len - parsed < field_len + 1)
7127 case EIR_UUID16_ALL:
7128 case EIR_UUID16_SOME:
7129 for (i = 0; i + 3 <= field_len; i += 2) {
7130 memcpy(uuid, bluetooth_base_uuid, 16);
7131 uuid[13] = eir[i + 3];
7132 uuid[12] = eir[i + 2];
7133 if (has_uuid(uuid, uuid_count, uuids))
7137 case EIR_UUID32_ALL:
7138 case EIR_UUID32_SOME:
7139 for (i = 0; i + 5 <= field_len; i += 4) {
7140 memcpy(uuid, bluetooth_base_uuid, 16);
7141 uuid[15] = eir[i + 5];
7142 uuid[14] = eir[i + 4];
7143 uuid[13] = eir[i + 3];
7144 uuid[12] = eir[i + 2];
7145 if (has_uuid(uuid, uuid_count, uuids))
7149 case EIR_UUID128_ALL:
7150 case EIR_UUID128_SOME:
7151 for (i = 0; i + 17 <= field_len; i += 16) {
7152 memcpy(uuid, eir + i + 2, 16);
7153 if (has_uuid(uuid, uuid_count, uuids))
7159 parsed += field_len + 1;
7160 eir += field_len + 1;
7166 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7167 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7168 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7171 struct mgmt_ev_device_found *ev = (void *) buf;
7175 /* Don't send events for a non-kernel initiated discovery. With
7176 * LE one exception is if we have pend_le_reports > 0 in which
7177 * case we're doing passive scanning and want these events.
7179 if (!hci_discovery_active(hdev)) {
7180 if (link_type == ACL_LINK)
7182 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7186 /* When using service discovery with a RSSI threshold, then check
7187 * if such a RSSI threshold is specified. If a RSSI threshold has
7188 * been specified, then all results with a RSSI smaller than the
7189 * RSSI threshold will be dropped.
7191 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7192 * the results are also dropped.
7194 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7195 (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID))
7198 /* Make sure that the buffer is big enough. The 5 extra bytes
7199 * are for the potential CoD field.
7201 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7204 memset(buf, 0, sizeof(buf));
7206 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7207 * RSSI value was reported as 0 when not available. This behavior
7208 * is kept when using device discovery. This is required for full
7209 * backwards compatibility with the API.
7211 * However when using service discovery, the value 127 will be
7212 * returned when the RSSI is not available.
7214 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi)
7217 bacpy(&ev->addr.bdaddr, bdaddr);
7218 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7220 ev->flags = cpu_to_le32(flags);
7223 /* When using service discovery and a list of UUID is
7224 * provided, results with no matching UUID should be
7225 * dropped. In case there is a match the result is
7226 * kept and checking possible scan response data
7229 if (hdev->discovery.uuid_count > 0)
7230 match = eir_has_uuids(eir, eir_len,
7231 hdev->discovery.uuid_count,
7232 hdev->discovery.uuids);
7236 if (!match && !scan_rsp_len)
7239 /* Copy EIR or advertising data into event */
7240 memcpy(ev->eir, eir, eir_len);
7242 /* When using service discovery and a list of UUID is
7243 * provided, results with empty EIR or advertising data
7244 * should be dropped since they do not match any UUID.
7246 if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
7252 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7253 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7256 if (scan_rsp_len > 0) {
7257 /* When using service discovery and a list of UUID is
7258 * provided, results with no matching UUID should be
7259 * dropped if there is no previous match from the
7262 if (hdev->discovery.uuid_count > 0) {
7263 if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
7264 hdev->discovery.uuid_count,
7265 hdev->discovery.uuids))
7269 /* Append scan response data to event */
7270 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7272 /* When using service discovery and a list of UUID is
7273 * provided, results with empty scan response and no
7274 * previous matched advertising data should be dropped.
7276 if (hdev->discovery.uuid_count > 0 && !match)
7280 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7281 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7283 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7286 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7287 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7289 struct mgmt_ev_device_found *ev;
7290 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7293 ev = (struct mgmt_ev_device_found *) buf;
7295 memset(buf, 0, sizeof(buf));
7297 bacpy(&ev->addr.bdaddr, bdaddr);
7298 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7301 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7304 ev->eir_len = cpu_to_le16(eir_len);
7306 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7309 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7311 struct mgmt_ev_discovering ev;
7313 BT_DBG("%s discovering %u", hdev->name, discovering);
7315 memset(&ev, 0, sizeof(ev));
7316 ev.type = hdev->discovery.type;
7317 ev.discovering = discovering;
7319 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7322 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
7324 BT_DBG("%s status %u", hdev->name, status);
7327 void mgmt_reenable_advertising(struct hci_dev *hdev)
7329 struct hci_request req;
7331 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7334 hci_req_init(&req, hdev);
7335 enable_advertising(&req);
7336 hci_req_run(&req, adv_enable_complete);