2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "hci_request.h"
38 #define MGMT_VERSION 1
39 #define MGMT_REVISION 8
41 static const u16 mgmt_commands[] = {
42 MGMT_OP_READ_INDEX_LIST,
45 MGMT_OP_SET_DISCOVERABLE,
46 MGMT_OP_SET_CONNECTABLE,
47 MGMT_OP_SET_FAST_CONNECTABLE,
49 MGMT_OP_SET_LINK_SECURITY,
53 MGMT_OP_SET_DEV_CLASS,
54 MGMT_OP_SET_LOCAL_NAME,
57 MGMT_OP_LOAD_LINK_KEYS,
58 MGMT_OP_LOAD_LONG_TERM_KEYS,
60 MGMT_OP_GET_CONNECTIONS,
61 MGMT_OP_PIN_CODE_REPLY,
62 MGMT_OP_PIN_CODE_NEG_REPLY,
63 MGMT_OP_SET_IO_CAPABILITY,
65 MGMT_OP_CANCEL_PAIR_DEVICE,
66 MGMT_OP_UNPAIR_DEVICE,
67 MGMT_OP_USER_CONFIRM_REPLY,
68 MGMT_OP_USER_CONFIRM_NEG_REPLY,
69 MGMT_OP_USER_PASSKEY_REPLY,
70 MGMT_OP_USER_PASSKEY_NEG_REPLY,
71 MGMT_OP_READ_LOCAL_OOB_DATA,
72 MGMT_OP_ADD_REMOTE_OOB_DATA,
73 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
74 MGMT_OP_START_DISCOVERY,
75 MGMT_OP_STOP_DISCOVERY,
78 MGMT_OP_UNBLOCK_DEVICE,
79 MGMT_OP_SET_DEVICE_ID,
80 MGMT_OP_SET_ADVERTISING,
82 MGMT_OP_SET_STATIC_ADDRESS,
83 MGMT_OP_SET_SCAN_PARAMS,
84 MGMT_OP_SET_SECURE_CONN,
85 MGMT_OP_SET_DEBUG_KEYS,
88 MGMT_OP_GET_CONN_INFO,
89 MGMT_OP_GET_CLOCK_INFO,
91 MGMT_OP_REMOVE_DEVICE,
92 MGMT_OP_LOAD_CONN_PARAM,
93 MGMT_OP_READ_UNCONF_INDEX_LIST,
94 MGMT_OP_READ_CONFIG_INFO,
95 MGMT_OP_SET_EXTERNAL_CONFIG,
96 MGMT_OP_SET_PUBLIC_ADDRESS,
97 MGMT_OP_START_SERVICE_DISCOVERY,
100 static const u16 mgmt_events[] = {
101 MGMT_EV_CONTROLLER_ERROR,
103 MGMT_EV_INDEX_REMOVED,
104 MGMT_EV_NEW_SETTINGS,
105 MGMT_EV_CLASS_OF_DEV_CHANGED,
106 MGMT_EV_LOCAL_NAME_CHANGED,
107 MGMT_EV_NEW_LINK_KEY,
108 MGMT_EV_NEW_LONG_TERM_KEY,
109 MGMT_EV_DEVICE_CONNECTED,
110 MGMT_EV_DEVICE_DISCONNECTED,
111 MGMT_EV_CONNECT_FAILED,
112 MGMT_EV_PIN_CODE_REQUEST,
113 MGMT_EV_USER_CONFIRM_REQUEST,
114 MGMT_EV_USER_PASSKEY_REQUEST,
116 MGMT_EV_DEVICE_FOUND,
118 MGMT_EV_DEVICE_BLOCKED,
119 MGMT_EV_DEVICE_UNBLOCKED,
120 MGMT_EV_DEVICE_UNPAIRED,
121 MGMT_EV_PASSKEY_NOTIFY,
124 MGMT_EV_DEVICE_ADDED,
125 MGMT_EV_DEVICE_REMOVED,
126 MGMT_EV_NEW_CONN_PARAM,
127 MGMT_EV_UNCONF_INDEX_ADDED,
128 MGMT_EV_UNCONF_INDEX_REMOVED,
129 MGMT_EV_NEW_CONFIG_OPTIONS,
132 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
135 struct list_head list;
142 int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
145 /* HCI to MGMT error code conversion table */
146 static u8 mgmt_status_table[] = {
148 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
149 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
150 MGMT_STATUS_FAILED, /* Hardware Failure */
151 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
152 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
153 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
154 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
155 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
157 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
158 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
159 MGMT_STATUS_BUSY, /* Command Disallowed */
160 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
161 MGMT_STATUS_REJECTED, /* Rejected Security */
162 MGMT_STATUS_REJECTED, /* Rejected Personal */
163 MGMT_STATUS_TIMEOUT, /* Host Timeout */
164 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
165 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
166 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
167 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
168 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
169 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
170 MGMT_STATUS_BUSY, /* Repeated Attempts */
171 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
172 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
173 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
174 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
175 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
176 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
177 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
178 MGMT_STATUS_FAILED, /* Unspecified Error */
179 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
180 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
181 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
182 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
183 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
184 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
185 MGMT_STATUS_FAILED, /* Unit Link Key Used */
186 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
187 MGMT_STATUS_TIMEOUT, /* Instant Passed */
188 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
189 MGMT_STATUS_FAILED, /* Transaction Collision */
190 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
191 MGMT_STATUS_REJECTED, /* QoS Rejected */
192 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
193 MGMT_STATUS_REJECTED, /* Insufficient Security */
194 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
195 MGMT_STATUS_BUSY, /* Role Switch Pending */
196 MGMT_STATUS_FAILED, /* Slot Violation */
197 MGMT_STATUS_FAILED, /* Role Switch Failed */
198 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
199 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
200 MGMT_STATUS_BUSY, /* Host Busy Pairing */
201 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
202 MGMT_STATUS_BUSY, /* Controller Busy */
203 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
204 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
205 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
206 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
207 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
210 static u8 mgmt_status(u8 hci_status)
212 if (hci_status < ARRAY_SIZE(mgmt_status_table))
213 return mgmt_status_table[hci_status];
215 return MGMT_STATUS_FAILED;
218 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
219 struct sock *skip_sk)
222 struct mgmt_hdr *hdr;
224 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
228 hdr = (void *) skb_put(skb, sizeof(*hdr));
229 hdr->opcode = cpu_to_le16(event);
231 hdr->index = cpu_to_le16(hdev->id);
233 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
234 hdr->len = cpu_to_le16(data_len);
237 memcpy(skb_put(skb, data_len), data, data_len);
240 __net_timestamp(skb);
242 hci_send_to_control(skb, skip_sk);
248 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
251 struct mgmt_hdr *hdr;
252 struct mgmt_ev_cmd_status *ev;
255 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
257 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
261 hdr = (void *) skb_put(skb, sizeof(*hdr));
263 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
264 hdr->index = cpu_to_le16(index);
265 hdr->len = cpu_to_le16(sizeof(*ev));
267 ev = (void *) skb_put(skb, sizeof(*ev));
269 ev->opcode = cpu_to_le16(cmd);
271 err = sock_queue_rcv_skb(sk, skb);
278 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
279 void *rp, size_t rp_len)
282 struct mgmt_hdr *hdr;
283 struct mgmt_ev_cmd_complete *ev;
286 BT_DBG("sock %p", sk);
288 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
292 hdr = (void *) skb_put(skb, sizeof(*hdr));
294 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
295 hdr->index = cpu_to_le16(index);
296 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
298 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
299 ev->opcode = cpu_to_le16(cmd);
303 memcpy(ev->data, rp, rp_len);
305 err = sock_queue_rcv_skb(sk, skb);
312 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
315 struct mgmt_rp_read_version rp;
317 BT_DBG("sock %p", sk);
319 rp.version = MGMT_VERSION;
320 rp.revision = cpu_to_le16(MGMT_REVISION);
322 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
326 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
329 struct mgmt_rp_read_commands *rp;
330 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
331 const u16 num_events = ARRAY_SIZE(mgmt_events);
336 BT_DBG("sock %p", sk);
338 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
340 rp = kmalloc(rp_size, GFP_KERNEL);
344 rp->num_commands = cpu_to_le16(num_commands);
345 rp->num_events = cpu_to_le16(num_events);
347 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
348 put_unaligned_le16(mgmt_commands[i], opcode);
350 for (i = 0; i < num_events; i++, opcode++)
351 put_unaligned_le16(mgmt_events[i], opcode);
353 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
360 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
363 struct mgmt_rp_read_index_list *rp;
369 BT_DBG("sock %p", sk);
371 read_lock(&hci_dev_list_lock);
374 list_for_each_entry(d, &hci_dev_list, list) {
375 if (d->dev_type == HCI_BREDR &&
376 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
380 rp_len = sizeof(*rp) + (2 * count);
381 rp = kmalloc(rp_len, GFP_ATOMIC);
383 read_unlock(&hci_dev_list_lock);
388 list_for_each_entry(d, &hci_dev_list, list) {
389 if (test_bit(HCI_SETUP, &d->dev_flags) ||
390 test_bit(HCI_CONFIG, &d->dev_flags) ||
391 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
394 /* Devices marked as raw-only are neither configured
395 * nor unconfigured controllers.
397 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
400 if (d->dev_type == HCI_BREDR &&
401 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
402 rp->index[count++] = cpu_to_le16(d->id);
403 BT_DBG("Added hci%u", d->id);
407 rp->num_controllers = cpu_to_le16(count);
408 rp_len = sizeof(*rp) + (2 * count);
410 read_unlock(&hci_dev_list_lock);
412 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
420 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
421 void *data, u16 data_len)
423 struct mgmt_rp_read_unconf_index_list *rp;
429 BT_DBG("sock %p", sk);
431 read_lock(&hci_dev_list_lock);
434 list_for_each_entry(d, &hci_dev_list, list) {
435 if (d->dev_type == HCI_BREDR &&
436 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
440 rp_len = sizeof(*rp) + (2 * count);
441 rp = kmalloc(rp_len, GFP_ATOMIC);
443 read_unlock(&hci_dev_list_lock);
448 list_for_each_entry(d, &hci_dev_list, list) {
449 if (test_bit(HCI_SETUP, &d->dev_flags) ||
450 test_bit(HCI_CONFIG, &d->dev_flags) ||
451 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
454 /* Devices marked as raw-only are neither configured
455 * nor unconfigured controllers.
457 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
460 if (d->dev_type == HCI_BREDR &&
461 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
462 rp->index[count++] = cpu_to_le16(d->id);
463 BT_DBG("Added hci%u", d->id);
467 rp->num_controllers = cpu_to_le16(count);
468 rp_len = sizeof(*rp) + (2 * count);
470 read_unlock(&hci_dev_list_lock);
472 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
480 static bool is_configured(struct hci_dev *hdev)
482 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
483 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
486 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
487 !bacmp(&hdev->public_addr, BDADDR_ANY))
493 static __le32 get_missing_options(struct hci_dev *hdev)
497 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
498 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
499 options |= MGMT_OPTION_EXTERNAL_CONFIG;
501 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
502 !bacmp(&hdev->public_addr, BDADDR_ANY))
503 options |= MGMT_OPTION_PUBLIC_ADDRESS;
505 return cpu_to_le32(options);
508 static int new_options(struct hci_dev *hdev, struct sock *skip)
510 __le32 options = get_missing_options(hdev);
512 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
513 sizeof(options), skip);
516 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
518 __le32 options = get_missing_options(hdev);
520 return cmd_complete(sk, hdev->id, opcode, 0, &options,
524 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
525 void *data, u16 data_len)
527 struct mgmt_rp_read_config_info rp;
530 BT_DBG("sock %p %s", sk, hdev->name);
534 memset(&rp, 0, sizeof(rp));
535 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
537 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
538 options |= MGMT_OPTION_EXTERNAL_CONFIG;
540 if (hdev->set_bdaddr)
541 options |= MGMT_OPTION_PUBLIC_ADDRESS;
543 rp.supported_options = cpu_to_le32(options);
544 rp.missing_options = get_missing_options(hdev);
546 hci_dev_unlock(hdev);
548 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
552 static u32 get_supported_settings(struct hci_dev *hdev)
556 settings |= MGMT_SETTING_POWERED;
557 settings |= MGMT_SETTING_BONDABLE;
558 settings |= MGMT_SETTING_DEBUG_KEYS;
559 settings |= MGMT_SETTING_CONNECTABLE;
560 settings |= MGMT_SETTING_DISCOVERABLE;
562 if (lmp_bredr_capable(hdev)) {
563 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
564 settings |= MGMT_SETTING_FAST_CONNECTABLE;
565 settings |= MGMT_SETTING_BREDR;
566 settings |= MGMT_SETTING_LINK_SECURITY;
568 if (lmp_ssp_capable(hdev)) {
569 settings |= MGMT_SETTING_SSP;
570 settings |= MGMT_SETTING_HS;
573 if (lmp_sc_capable(hdev))
574 settings |= MGMT_SETTING_SECURE_CONN;
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_SECURE_CONN;
581 settings |= MGMT_SETTING_PRIVACY;
584 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
586 settings |= MGMT_SETTING_CONFIGURATION;
591 static u32 get_current_settings(struct hci_dev *hdev)
595 if (hdev_is_powered(hdev))
596 settings |= MGMT_SETTING_POWERED;
598 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
599 settings |= MGMT_SETTING_CONNECTABLE;
601 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
602 settings |= MGMT_SETTING_FAST_CONNECTABLE;
604 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
605 settings |= MGMT_SETTING_DISCOVERABLE;
607 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
608 settings |= MGMT_SETTING_BONDABLE;
610 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
611 settings |= MGMT_SETTING_BREDR;
613 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
614 settings |= MGMT_SETTING_LE;
616 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
617 settings |= MGMT_SETTING_LINK_SECURITY;
619 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
620 settings |= MGMT_SETTING_SSP;
622 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
623 settings |= MGMT_SETTING_HS;
625 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
626 settings |= MGMT_SETTING_ADVERTISING;
628 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
629 settings |= MGMT_SETTING_SECURE_CONN;
631 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
632 settings |= MGMT_SETTING_DEBUG_KEYS;
634 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
635 settings |= MGMT_SETTING_PRIVACY;
640 #define PNP_INFO_SVCLASS_ID 0x1200
642 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
644 u8 *ptr = data, *uuids_start = NULL;
645 struct bt_uuid *uuid;
650 list_for_each_entry(uuid, &hdev->uuids, list) {
653 if (uuid->size != 16)
656 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
660 if (uuid16 == PNP_INFO_SVCLASS_ID)
666 uuids_start[1] = EIR_UUID16_ALL;
670 /* Stop if not enough space to put next UUID */
671 if ((ptr - data) + sizeof(u16) > len) {
672 uuids_start[1] = EIR_UUID16_SOME;
676 *ptr++ = (uuid16 & 0x00ff);
677 *ptr++ = (uuid16 & 0xff00) >> 8;
678 uuids_start[0] += sizeof(uuid16);
684 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
686 u8 *ptr = data, *uuids_start = NULL;
687 struct bt_uuid *uuid;
692 list_for_each_entry(uuid, &hdev->uuids, list) {
693 if (uuid->size != 32)
699 uuids_start[1] = EIR_UUID32_ALL;
703 /* Stop if not enough space to put next UUID */
704 if ((ptr - data) + sizeof(u32) > len) {
705 uuids_start[1] = EIR_UUID32_SOME;
709 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
711 uuids_start[0] += sizeof(u32);
717 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
719 u8 *ptr = data, *uuids_start = NULL;
720 struct bt_uuid *uuid;
725 list_for_each_entry(uuid, &hdev->uuids, list) {
726 if (uuid->size != 128)
732 uuids_start[1] = EIR_UUID128_ALL;
736 /* Stop if not enough space to put next UUID */
737 if ((ptr - data) + 16 > len) {
738 uuids_start[1] = EIR_UUID128_SOME;
742 memcpy(ptr, uuid->uuid, 16);
744 uuids_start[0] += 16;
750 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
752 struct pending_cmd *cmd;
754 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
755 if (cmd->opcode == opcode)
762 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
763 struct hci_dev *hdev,
766 struct pending_cmd *cmd;
768 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
769 if (cmd->user_data != data)
771 if (cmd->opcode == opcode)
778 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
783 name_len = strlen(hdev->dev_name);
785 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
787 if (name_len > max_len) {
789 ptr[1] = EIR_NAME_SHORT;
791 ptr[1] = EIR_NAME_COMPLETE;
793 ptr[0] = name_len + 1;
795 memcpy(ptr + 2, hdev->dev_name, name_len);
797 ad_len += (name_len + 2);
798 ptr += (name_len + 2);
804 static void update_scan_rsp_data(struct hci_request *req)
806 struct hci_dev *hdev = req->hdev;
807 struct hci_cp_le_set_scan_rsp_data cp;
810 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
813 memset(&cp, 0, sizeof(cp));
815 len = create_scan_rsp_data(hdev, cp.data);
817 if (hdev->scan_rsp_data_len == len &&
818 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
821 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
822 hdev->scan_rsp_data_len = len;
826 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
829 static u8 get_adv_discov_flags(struct hci_dev *hdev)
831 struct pending_cmd *cmd;
833 /* If there's a pending mgmt command the flags will not yet have
834 * their final values, so check for this first.
836 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
838 struct mgmt_mode *cp = cmd->param;
840 return LE_AD_GENERAL;
841 else if (cp->val == 0x02)
842 return LE_AD_LIMITED;
844 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
845 return LE_AD_LIMITED;
846 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
847 return LE_AD_GENERAL;
853 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
855 u8 ad_len = 0, flags = 0;
857 flags |= get_adv_discov_flags(hdev);
859 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
860 flags |= LE_AD_NO_BREDR;
863 BT_DBG("adv flags 0x%02x", flags);
873 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
875 ptr[1] = EIR_TX_POWER;
876 ptr[2] = (u8) hdev->adv_tx_power;
885 static void update_adv_data(struct hci_request *req)
887 struct hci_dev *hdev = req->hdev;
888 struct hci_cp_le_set_adv_data cp;
891 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
894 memset(&cp, 0, sizeof(cp));
896 len = create_adv_data(hdev, cp.data);
898 if (hdev->adv_data_len == len &&
899 memcmp(cp.data, hdev->adv_data, len) == 0)
902 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
903 hdev->adv_data_len = len;
907 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
910 int mgmt_update_adv_data(struct hci_dev *hdev)
912 struct hci_request req;
914 hci_req_init(&req, hdev);
915 update_adv_data(&req);
917 return hci_req_run(&req, NULL);
920 static void create_eir(struct hci_dev *hdev, u8 *data)
925 name_len = strlen(hdev->dev_name);
931 ptr[1] = EIR_NAME_SHORT;
933 ptr[1] = EIR_NAME_COMPLETE;
935 /* EIR Data length */
936 ptr[0] = name_len + 1;
938 memcpy(ptr + 2, hdev->dev_name, name_len);
940 ptr += (name_len + 2);
943 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
945 ptr[1] = EIR_TX_POWER;
946 ptr[2] = (u8) hdev->inq_tx_power;
951 if (hdev->devid_source > 0) {
953 ptr[1] = EIR_DEVICE_ID;
955 put_unaligned_le16(hdev->devid_source, ptr + 2);
956 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
957 put_unaligned_le16(hdev->devid_product, ptr + 6);
958 put_unaligned_le16(hdev->devid_version, ptr + 8);
963 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
964 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
965 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
968 static void update_eir(struct hci_request *req)
970 struct hci_dev *hdev = req->hdev;
971 struct hci_cp_write_eir cp;
973 if (!hdev_is_powered(hdev))
976 if (!lmp_ext_inq_capable(hdev))
979 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
982 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
985 memset(&cp, 0, sizeof(cp));
987 create_eir(hdev, cp.data);
989 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
992 memcpy(hdev->eir, cp.data, sizeof(cp.data));
994 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
997 static u8 get_service_classes(struct hci_dev *hdev)
999 struct bt_uuid *uuid;
1002 list_for_each_entry(uuid, &hdev->uuids, list)
1003 val |= uuid->svc_hint;
1008 static void update_class(struct hci_request *req)
1010 struct hci_dev *hdev = req->hdev;
1013 BT_DBG("%s", hdev->name);
1015 if (!hdev_is_powered(hdev))
1018 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1021 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1024 cod[0] = hdev->minor_class;
1025 cod[1] = hdev->major_class;
1026 cod[2] = get_service_classes(hdev);
1028 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1031 if (memcmp(cod, hdev->dev_class, 3) == 0)
1034 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1037 static bool get_connectable(struct hci_dev *hdev)
1039 struct pending_cmd *cmd;
1041 /* If there's a pending mgmt command the flag will not yet have
1042 * it's final value, so check for this first.
1044 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1046 struct mgmt_mode *cp = cmd->param;
1050 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1053 static void disable_advertising(struct hci_request *req)
1057 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1060 static void enable_advertising(struct hci_request *req)
1062 struct hci_dev *hdev = req->hdev;
1063 struct hci_cp_le_set_adv_param cp;
1064 u8 own_addr_type, enable = 0x01;
1067 if (hci_conn_num(hdev, LE_LINK) > 0)
1070 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1071 disable_advertising(req);
1073 /* Clear the HCI_LE_ADV bit temporarily so that the
1074 * hci_update_random_address knows that it's safe to go ahead
1075 * and write a new random address. The flag will be set back on
1076 * as soon as the SET_ADV_ENABLE HCI command completes.
1078 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1080 connectable = get_connectable(hdev);
1082 /* Set require_privacy to true only when non-connectable
1083 * advertising is used. In that case it is fine to use a
1084 * non-resolvable private address.
1086 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1089 memset(&cp, 0, sizeof(cp));
1090 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1091 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1092 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1093 cp.own_address_type = own_addr_type;
1094 cp.channel_map = hdev->le_adv_channel_map;
1096 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1098 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1101 static void service_cache_off(struct work_struct *work)
1103 struct hci_dev *hdev = container_of(work, struct hci_dev,
1104 service_cache.work);
1105 struct hci_request req;
1107 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1110 hci_req_init(&req, hdev);
1117 hci_dev_unlock(hdev);
1119 hci_req_run(&req, NULL);
1122 static void rpa_expired(struct work_struct *work)
1124 struct hci_dev *hdev = container_of(work, struct hci_dev,
1126 struct hci_request req;
1130 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1132 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1135 /* The generation of a new RPA and programming it into the
1136 * controller happens in the enable_advertising() function.
1138 hci_req_init(&req, hdev);
1139 enable_advertising(&req);
1140 hci_req_run(&req, NULL);
1143 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1145 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1148 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1149 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1151 /* Non-mgmt controlled devices get this bit set
1152 * implicitly so that pairing works for them, however
1153 * for mgmt we require user-space to explicitly enable
1156 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1159 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1160 void *data, u16 data_len)
1162 struct mgmt_rp_read_info rp;
1164 BT_DBG("sock %p %s", sk, hdev->name);
1168 memset(&rp, 0, sizeof(rp));
1170 bacpy(&rp.bdaddr, &hdev->bdaddr);
1172 rp.version = hdev->hci_ver;
1173 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1175 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1176 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1178 memcpy(rp.dev_class, hdev->dev_class, 3);
1180 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1181 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1183 hci_dev_unlock(hdev);
1185 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1189 static void mgmt_pending_free(struct pending_cmd *cmd)
1196 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1197 struct hci_dev *hdev, void *data,
1200 struct pending_cmd *cmd;
1202 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1206 cmd->opcode = opcode;
1207 cmd->index = hdev->id;
1209 cmd->param = kmemdup(data, len, GFP_KERNEL);
1215 cmd->param_len = len;
1220 list_add(&cmd->list, &hdev->mgmt_pending);
1225 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1226 void (*cb)(struct pending_cmd *cmd,
1230 struct pending_cmd *cmd, *tmp;
1232 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1233 if (opcode > 0 && cmd->opcode != opcode)
1240 static void mgmt_pending_remove(struct pending_cmd *cmd)
1242 list_del(&cmd->list);
1243 mgmt_pending_free(cmd);
1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1254 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1256 BT_DBG("%s status 0x%02x", hdev->name, status);
1258 if (hci_conn_count(hdev) == 0) {
1259 cancel_delayed_work(&hdev->power_off);
1260 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1264 static bool hci_stop_discovery(struct hci_request *req)
1266 struct hci_dev *hdev = req->hdev;
1267 struct hci_cp_remote_name_req_cancel cp;
1268 struct inquiry_entry *e;
1270 switch (hdev->discovery.state) {
1271 case DISCOVERY_FINDING:
1272 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1273 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1275 cancel_delayed_work(&hdev->le_scan_disable);
1276 hci_req_add_le_scan_disable(req);
1281 case DISCOVERY_RESOLVING:
1282 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1287 bacpy(&cp.bdaddr, &e->data.bdaddr);
1288 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1294 /* Passive scanning */
1295 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1296 hci_req_add_le_scan_disable(req);
1306 static int clean_up_hci_state(struct hci_dev *hdev)
1308 struct hci_request req;
1309 struct hci_conn *conn;
1310 bool discov_stopped;
1313 hci_req_init(&req, hdev);
1315 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1316 test_bit(HCI_PSCAN, &hdev->flags)) {
1318 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1321 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1322 disable_advertising(&req);
1324 discov_stopped = hci_stop_discovery(&req);
1326 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1327 struct hci_cp_disconnect dc;
1328 struct hci_cp_reject_conn_req rej;
1330 switch (conn->state) {
1333 dc.handle = cpu_to_le16(conn->handle);
1334 dc.reason = 0x15; /* Terminated due to Power Off */
1335 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1338 if (conn->type == LE_LINK)
1339 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1341 else if (conn->type == ACL_LINK)
1342 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1346 bacpy(&rej.bdaddr, &conn->dst);
1347 rej.reason = 0x15; /* Terminated due to Power Off */
1348 if (conn->type == ACL_LINK)
1349 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1351 else if (conn->type == SCO_LINK)
1352 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1358 err = hci_req_run(&req, clean_up_hci_complete);
1359 if (!err && discov_stopped)
1360 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1365 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1368 struct mgmt_mode *cp = data;
1369 struct pending_cmd *cmd;
1372 BT_DBG("request for %s", hdev->name);
1374 if (cp->val != 0x00 && cp->val != 0x01)
1375 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1376 MGMT_STATUS_INVALID_PARAMS);
1380 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1381 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1386 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1387 cancel_delayed_work(&hdev->power_off);
1390 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1392 err = mgmt_powered(hdev, 1);
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1409 queue_work(hdev->req_workqueue, &hdev->power_on);
1412 /* Disconnect connections, stop scans, etc */
1413 err = clean_up_hci_state(hdev);
1415 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1416 HCI_POWER_OFF_TIMEOUT);
1418 /* ENODATA means there were no HCI commands queued */
1419 if (err == -ENODATA) {
1420 cancel_delayed_work(&hdev->power_off);
1421 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1427 hci_dev_unlock(hdev);
1431 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1435 ev = cpu_to_le32(get_current_settings(hdev));
1437 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1440 int mgmt_new_settings(struct hci_dev *hdev)
1442 return new_settings(hdev, NULL);
1447 struct hci_dev *hdev;
1451 static void settings_rsp(struct pending_cmd *cmd, void *data)
1453 struct cmd_lookup *match = data;
1455 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1457 list_del(&cmd->list);
1459 if (match->sk == NULL) {
1460 match->sk = cmd->sk;
1461 sock_hold(match->sk);
1464 mgmt_pending_free(cmd);
1467 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1471 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1472 mgmt_pending_remove(cmd);
1475 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1477 if (cmd->cmd_complete) {
1480 cmd->cmd_complete(cmd, *status);
1481 mgmt_pending_remove(cmd);
1486 cmd_status_rsp(cmd, data);
1489 static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1491 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1492 cmd->param, cmd->param_len);
1495 static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1497 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1498 sizeof(struct mgmt_addr_info));
1501 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1503 if (!lmp_bredr_capable(hdev))
1504 return MGMT_STATUS_NOT_SUPPORTED;
1505 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1506 return MGMT_STATUS_REJECTED;
1508 return MGMT_STATUS_SUCCESS;
1511 static u8 mgmt_le_support(struct hci_dev *hdev)
1513 if (!lmp_le_capable(hdev))
1514 return MGMT_STATUS_NOT_SUPPORTED;
1515 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1516 return MGMT_STATUS_REJECTED;
1518 return MGMT_STATUS_SUCCESS;
1521 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1524 struct pending_cmd *cmd;
1525 struct mgmt_mode *cp;
1526 struct hci_request req;
1529 BT_DBG("status 0x%02x", status);
1533 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1538 u8 mgmt_err = mgmt_status(status);
1539 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1540 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1546 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1549 if (hdev->discov_timeout > 0) {
1550 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1551 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1555 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1559 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1562 new_settings(hdev, cmd->sk);
1564 /* When the discoverable mode gets changed, make sure
1565 * that class of device has the limited discoverable
1566 * bit correctly set. Also update page scan based on whitelist
1569 hci_req_init(&req, hdev);
1570 __hci_update_page_scan(&req);
1572 hci_req_run(&req, NULL);
1575 mgmt_pending_remove(cmd);
1578 hci_dev_unlock(hdev);
1581 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1584 struct mgmt_cp_set_discoverable *cp = data;
1585 struct pending_cmd *cmd;
1586 struct hci_request req;
1591 BT_DBG("request for %s", hdev->name);
1593 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1594 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1595 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1596 MGMT_STATUS_REJECTED);
1598 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1599 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_INVALID_PARAMS);
1602 timeout = __le16_to_cpu(cp->timeout);
1604 /* Disabling discoverable requires that no timeout is set,
1605 * and enabling limited discoverable requires a timeout.
1607 if ((cp->val == 0x00 && timeout > 0) ||
1608 (cp->val == 0x02 && timeout == 0))
1609 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1610 MGMT_STATUS_INVALID_PARAMS);
1614 if (!hdev_is_powered(hdev) && timeout > 0) {
1615 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1616 MGMT_STATUS_NOT_POWERED);
1620 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1621 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1622 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1627 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1628 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1629 MGMT_STATUS_REJECTED);
1633 if (!hdev_is_powered(hdev)) {
1634 bool changed = false;
1636 /* Setting limited discoverable when powered off is
1637 * not a valid operation since it requires a timeout
1638 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1640 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1641 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1645 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1650 err = new_settings(hdev, sk);
1655 /* If the current mode is the same, then just update the timeout
1656 * value with the new value. And if only the timeout gets updated,
1657 * then no need for any HCI transactions.
1659 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1660 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1661 &hdev->dev_flags)) {
1662 cancel_delayed_work(&hdev->discov_off);
1663 hdev->discov_timeout = timeout;
1665 if (cp->val && hdev->discov_timeout > 0) {
1666 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1667 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1671 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1675 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1681 /* Cancel any potential discoverable timeout that might be
1682 * still active and store new timeout value. The arming of
1683 * the timeout happens in the complete handler.
1685 cancel_delayed_work(&hdev->discov_off);
1686 hdev->discov_timeout = timeout;
1688 /* Limited discoverable mode */
1689 if (cp->val == 0x02)
1690 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1692 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1694 hci_req_init(&req, hdev);
1696 /* The procedure for LE-only controllers is much simpler - just
1697 * update the advertising data.
1699 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1705 struct hci_cp_write_current_iac_lap hci_cp;
1707 if (cp->val == 0x02) {
1708 /* Limited discoverable mode */
1709 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1710 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1711 hci_cp.iac_lap[1] = 0x8b;
1712 hci_cp.iac_lap[2] = 0x9e;
1713 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1714 hci_cp.iac_lap[4] = 0x8b;
1715 hci_cp.iac_lap[5] = 0x9e;
1717 /* General discoverable mode */
1719 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1720 hci_cp.iac_lap[1] = 0x8b;
1721 hci_cp.iac_lap[2] = 0x9e;
1724 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1725 (hci_cp.num_iac * 3) + 1, &hci_cp);
1727 scan |= SCAN_INQUIRY;
1729 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1732 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1735 update_adv_data(&req);
1737 err = hci_req_run(&req, set_discoverable_complete);
1739 mgmt_pending_remove(cmd);
1742 hci_dev_unlock(hdev);
1746 static void write_fast_connectable(struct hci_request *req, bool enable)
1748 struct hci_dev *hdev = req->hdev;
1749 struct hci_cp_write_page_scan_activity acp;
1752 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1755 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1759 type = PAGE_SCAN_TYPE_INTERLACED;
1761 /* 160 msec page scan interval */
1762 acp.interval = cpu_to_le16(0x0100);
1764 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1766 /* default 1.28 sec page scan */
1767 acp.interval = cpu_to_le16(0x0800);
1770 acp.window = cpu_to_le16(0x0012);
1772 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1773 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1774 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1777 if (hdev->page_scan_type != type)
1778 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1781 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1784 struct pending_cmd *cmd;
1785 struct mgmt_mode *cp;
1786 bool conn_changed, discov_changed;
1788 BT_DBG("status 0x%02x", status);
1792 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1797 u8 mgmt_err = mgmt_status(status);
1798 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1804 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1806 discov_changed = false;
1808 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1810 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1814 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1816 if (conn_changed || discov_changed) {
1817 new_settings(hdev, cmd->sk);
1818 hci_update_page_scan(hdev);
1820 mgmt_update_adv_data(hdev);
1821 hci_update_background_scan(hdev);
1825 mgmt_pending_remove(cmd);
1828 hci_dev_unlock(hdev);
1831 static int set_connectable_update_settings(struct hci_dev *hdev,
1832 struct sock *sk, u8 val)
1834 bool changed = false;
1837 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1841 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1843 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1844 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1847 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1852 hci_update_page_scan(hdev);
1853 hci_update_background_scan(hdev);
1854 return new_settings(hdev, sk);
1860 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1863 struct mgmt_mode *cp = data;
1864 struct pending_cmd *cmd;
1865 struct hci_request req;
1869 BT_DBG("request for %s", hdev->name);
1871 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1872 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1873 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1874 MGMT_STATUS_REJECTED);
1876 if (cp->val != 0x00 && cp->val != 0x01)
1877 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1878 MGMT_STATUS_INVALID_PARAMS);
1882 if (!hdev_is_powered(hdev)) {
1883 err = set_connectable_update_settings(hdev, sk, cp->val);
1887 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1888 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1889 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1894 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1900 hci_req_init(&req, hdev);
1902 /* If BR/EDR is not enabled and we disable advertising as a
1903 * by-product of disabling connectable, we need to update the
1904 * advertising flags.
1906 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1908 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1909 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1911 update_adv_data(&req);
1912 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1916 /* If we don't have any whitelist entries just
1917 * disable all scanning. If there are entries
1918 * and we had both page and inquiry scanning
1919 * enabled then fall back to only page scanning.
1920 * Otherwise no changes are needed.
1922 if (list_empty(&hdev->whitelist))
1923 scan = SCAN_DISABLED;
1924 else if (test_bit(HCI_ISCAN, &hdev->flags))
1927 goto no_scan_update;
1929 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1930 hdev->discov_timeout > 0)
1931 cancel_delayed_work(&hdev->discov_off);
1934 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1938 /* If we're going from non-connectable to connectable or
1939 * vice-versa when fast connectable is enabled ensure that fast
1940 * connectable gets disabled. write_fast_connectable won't do
1941 * anything if the page scan parameters are already what they
1944 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1945 write_fast_connectable(&req, false);
1947 /* Update the advertising parameters if necessary */
1948 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1949 enable_advertising(&req);
1951 err = hci_req_run(&req, set_connectable_complete);
1953 mgmt_pending_remove(cmd);
1954 if (err == -ENODATA)
1955 err = set_connectable_update_settings(hdev, sk,
1961 hci_dev_unlock(hdev);
1965 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1968 struct mgmt_mode *cp = data;
1972 BT_DBG("request for %s", hdev->name);
1974 if (cp->val != 0x00 && cp->val != 0x01)
1975 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1976 MGMT_STATUS_INVALID_PARAMS);
1981 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1983 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1985 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1990 err = new_settings(hdev, sk);
1993 hci_dev_unlock(hdev);
1997 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2000 struct mgmt_mode *cp = data;
2001 struct pending_cmd *cmd;
2005 BT_DBG("request for %s", hdev->name);
2007 status = mgmt_bredr_support(hdev);
2009 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2012 if (cp->val != 0x00 && cp->val != 0x01)
2013 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2014 MGMT_STATUS_INVALID_PARAMS);
2018 if (!hdev_is_powered(hdev)) {
2019 bool changed = false;
2021 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2022 &hdev->dev_flags)) {
2023 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2027 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2032 err = new_settings(hdev, sk);
2037 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2038 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2045 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2046 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2050 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2056 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2058 mgmt_pending_remove(cmd);
2063 hci_dev_unlock(hdev);
2067 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2069 struct mgmt_mode *cp = data;
2070 struct pending_cmd *cmd;
2074 BT_DBG("request for %s", hdev->name);
2076 status = mgmt_bredr_support(hdev);
2078 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2080 if (!lmp_ssp_capable(hdev))
2081 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2082 MGMT_STATUS_NOT_SUPPORTED);
2084 if (cp->val != 0x00 && cp->val != 0x01)
2085 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2086 MGMT_STATUS_INVALID_PARAMS);
2090 if (!hdev_is_powered(hdev)) {
2094 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2097 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2100 changed = test_and_clear_bit(HCI_HS_ENABLED,
2103 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2106 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2111 err = new_settings(hdev, sk);
2116 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2117 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2118 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2123 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2124 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2128 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2134 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2135 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2136 sizeof(cp->val), &cp->val);
2138 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2140 mgmt_pending_remove(cmd);
2145 hci_dev_unlock(hdev);
2149 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2151 struct mgmt_mode *cp = data;
2156 BT_DBG("request for %s", hdev->name);
2158 status = mgmt_bredr_support(hdev);
2160 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2162 if (!lmp_ssp_capable(hdev))
2163 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2164 MGMT_STATUS_NOT_SUPPORTED);
2166 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2167 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2168 MGMT_STATUS_REJECTED);
2170 if (cp->val != 0x00 && cp->val != 0x01)
2171 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2172 MGMT_STATUS_INVALID_PARAMS);
2177 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2179 if (hdev_is_powered(hdev)) {
2180 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2181 MGMT_STATUS_REJECTED);
2185 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2188 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2193 err = new_settings(hdev, sk);
2196 hci_dev_unlock(hdev);
2200 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2202 struct cmd_lookup match = { NULL, hdev };
2207 u8 mgmt_err = mgmt_status(status);
2209 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2214 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2216 new_settings(hdev, match.sk);
2221 /* Make sure the controller has a good default for
2222 * advertising data. Restrict the update to when LE
2223 * has actually been enabled. During power on, the
2224 * update in powered_update_hci will take care of it.
2226 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2227 struct hci_request req;
2229 hci_req_init(&req, hdev);
2230 update_adv_data(&req);
2231 update_scan_rsp_data(&req);
2232 __hci_update_background_scan(&req);
2233 hci_req_run(&req, NULL);
2237 hci_dev_unlock(hdev);
2240 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2242 struct mgmt_mode *cp = data;
2243 struct hci_cp_write_le_host_supported hci_cp;
2244 struct pending_cmd *cmd;
2245 struct hci_request req;
2249 BT_DBG("request for %s", hdev->name);
2251 if (!lmp_le_capable(hdev))
2252 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2253 MGMT_STATUS_NOT_SUPPORTED);
2255 if (cp->val != 0x00 && cp->val != 0x01)
2256 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2257 MGMT_STATUS_INVALID_PARAMS);
2259 /* LE-only devices do not allow toggling LE on/off */
2260 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2261 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2262 MGMT_STATUS_REJECTED);
2267 enabled = lmp_host_le_capable(hdev);
2269 if (!hdev_is_powered(hdev) || val == enabled) {
2270 bool changed = false;
2272 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2273 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2277 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2278 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2282 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2287 err = new_settings(hdev, sk);
2292 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2293 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2294 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2299 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2305 hci_req_init(&req, hdev);
2307 memset(&hci_cp, 0, sizeof(hci_cp));
2311 hci_cp.simul = 0x00;
2313 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2314 disable_advertising(&req);
2317 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2320 err = hci_req_run(&req, le_enable_complete);
2322 mgmt_pending_remove(cmd);
2325 hci_dev_unlock(hdev);
2329 /* This is a helper function to test for pending mgmt commands that can
2330 * cause CoD or EIR HCI commands. We can only allow one such pending
2331 * mgmt command at a time since otherwise we cannot easily track what
2332 * the current values are, will be, and based on that calculate if a new
2333 * HCI command needs to be sent and if yes with what value.
2335 static bool pending_eir_or_class(struct hci_dev *hdev)
2337 struct pending_cmd *cmd;
2339 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2340 switch (cmd->opcode) {
2341 case MGMT_OP_ADD_UUID:
2342 case MGMT_OP_REMOVE_UUID:
2343 case MGMT_OP_SET_DEV_CLASS:
2344 case MGMT_OP_SET_POWERED:
2352 static const u8 bluetooth_base_uuid[] = {
2353 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2354 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2357 static u8 get_uuid_size(const u8 *uuid)
2361 if (memcmp(uuid, bluetooth_base_uuid, 12))
2364 val = get_unaligned_le32(&uuid[12]);
2371 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2373 struct pending_cmd *cmd;
2377 cmd = mgmt_pending_find(mgmt_op, hdev);
2381 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2382 hdev->dev_class, 3);
2384 mgmt_pending_remove(cmd);
2387 hci_dev_unlock(hdev);
2390 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2392 BT_DBG("status 0x%02x", status);
2394 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2397 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2399 struct mgmt_cp_add_uuid *cp = data;
2400 struct pending_cmd *cmd;
2401 struct hci_request req;
2402 struct bt_uuid *uuid;
2405 BT_DBG("request for %s", hdev->name);
2409 if (pending_eir_or_class(hdev)) {
2410 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2415 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2421 memcpy(uuid->uuid, cp->uuid, 16);
2422 uuid->svc_hint = cp->svc_hint;
2423 uuid->size = get_uuid_size(cp->uuid);
2425 list_add_tail(&uuid->list, &hdev->uuids);
2427 hci_req_init(&req, hdev);
2432 err = hci_req_run(&req, add_uuid_complete);
2434 if (err != -ENODATA)
2437 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2438 hdev->dev_class, 3);
2442 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2451 hci_dev_unlock(hdev);
2455 static bool enable_service_cache(struct hci_dev *hdev)
2457 if (!hdev_is_powered(hdev))
2460 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2461 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2469 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2471 BT_DBG("status 0x%02x", status);
2473 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2476 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2479 struct mgmt_cp_remove_uuid *cp = data;
2480 struct pending_cmd *cmd;
2481 struct bt_uuid *match, *tmp;
2482 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2483 struct hci_request req;
2486 BT_DBG("request for %s", hdev->name);
2490 if (pending_eir_or_class(hdev)) {
2491 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2496 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2497 hci_uuids_clear(hdev);
2499 if (enable_service_cache(hdev)) {
2500 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2501 0, hdev->dev_class, 3);
2510 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2511 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2514 list_del(&match->list);
2520 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2521 MGMT_STATUS_INVALID_PARAMS);
2526 hci_req_init(&req, hdev);
2531 err = hci_req_run(&req, remove_uuid_complete);
2533 if (err != -ENODATA)
2536 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2537 hdev->dev_class, 3);
2541 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2550 hci_dev_unlock(hdev);
2554 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2556 BT_DBG("status 0x%02x", status);
2558 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2561 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2564 struct mgmt_cp_set_dev_class *cp = data;
2565 struct pending_cmd *cmd;
2566 struct hci_request req;
2569 BT_DBG("request for %s", hdev->name);
2571 if (!lmp_bredr_capable(hdev))
2572 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2573 MGMT_STATUS_NOT_SUPPORTED);
2577 if (pending_eir_or_class(hdev)) {
2578 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2583 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2584 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2585 MGMT_STATUS_INVALID_PARAMS);
2589 hdev->major_class = cp->major;
2590 hdev->minor_class = cp->minor;
2592 if (!hdev_is_powered(hdev)) {
2593 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2594 hdev->dev_class, 3);
2598 hci_req_init(&req, hdev);
2600 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2601 hci_dev_unlock(hdev);
2602 cancel_delayed_work_sync(&hdev->service_cache);
2609 err = hci_req_run(&req, set_class_complete);
2611 if (err != -ENODATA)
2614 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2615 hdev->dev_class, 3);
2619 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2628 hci_dev_unlock(hdev);
2632 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2635 struct mgmt_cp_load_link_keys *cp = data;
2636 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2637 sizeof(struct mgmt_link_key_info));
2638 u16 key_count, expected_len;
2642 BT_DBG("request for %s", hdev->name);
2644 if (!lmp_bredr_capable(hdev))
2645 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2646 MGMT_STATUS_NOT_SUPPORTED);
2648 key_count = __le16_to_cpu(cp->key_count);
2649 if (key_count > max_key_count) {
2650 BT_ERR("load_link_keys: too big key_count value %u",
2652 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2653 MGMT_STATUS_INVALID_PARAMS);
2656 expected_len = sizeof(*cp) + key_count *
2657 sizeof(struct mgmt_link_key_info);
2658 if (expected_len != len) {
2659 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2661 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2662 MGMT_STATUS_INVALID_PARAMS);
2665 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2666 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2667 MGMT_STATUS_INVALID_PARAMS);
2669 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2672 for (i = 0; i < key_count; i++) {
2673 struct mgmt_link_key_info *key = &cp->keys[i];
2675 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2676 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2677 MGMT_STATUS_INVALID_PARAMS);
2682 hci_link_keys_clear(hdev);
2685 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2688 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2692 new_settings(hdev, NULL);
2694 for (i = 0; i < key_count; i++) {
2695 struct mgmt_link_key_info *key = &cp->keys[i];
2697 /* Always ignore debug keys and require a new pairing if
2698 * the user wants to use them.
2700 if (key->type == HCI_LK_DEBUG_COMBINATION)
2703 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2704 key->type, key->pin_len, NULL);
2707 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2709 hci_dev_unlock(hdev);
2714 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2715 u8 addr_type, struct sock *skip_sk)
2717 struct mgmt_ev_device_unpaired ev;
2719 bacpy(&ev.addr.bdaddr, bdaddr);
2720 ev.addr.type = addr_type;
2722 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2726 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2729 struct mgmt_cp_unpair_device *cp = data;
2730 struct mgmt_rp_unpair_device rp;
2731 struct hci_cp_disconnect dc;
2732 struct pending_cmd *cmd;
2733 struct hci_conn *conn;
2736 memset(&rp, 0, sizeof(rp));
2737 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2738 rp.addr.type = cp->addr.type;
2740 if (!bdaddr_type_is_valid(cp->addr.type))
2741 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2742 MGMT_STATUS_INVALID_PARAMS,
2745 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2746 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2747 MGMT_STATUS_INVALID_PARAMS,
2752 if (!hdev_is_powered(hdev)) {
2753 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2754 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2758 if (cp->addr.type == BDADDR_BREDR) {
2759 /* If disconnection is requested, then look up the
2760 * connection. If the remote device is connected, it
2761 * will be later used to terminate the link.
2763 * Setting it to NULL explicitly will cause no
2764 * termination of the link.
2767 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2772 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2776 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2779 /* Defer clearing up the connection parameters
2780 * until closing to give a chance of keeping
2781 * them if a repairing happens.
2783 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2785 /* If disconnection is not requested, then
2786 * clear the connection variable so that the
2787 * link is not terminated.
2789 if (!cp->disconnect)
2793 if (cp->addr.type == BDADDR_LE_PUBLIC)
2794 addr_type = ADDR_LE_DEV_PUBLIC;
2796 addr_type = ADDR_LE_DEV_RANDOM;
2798 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2800 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2804 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2805 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2809 /* If the connection variable is set, then termination of the
2810 * link is requested.
2813 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2815 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2819 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2826 cmd->cmd_complete = addr_cmd_complete;
2828 dc.handle = cpu_to_le16(conn->handle);
2829 dc.reason = 0x13; /* Remote User Terminated Connection */
2830 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2832 mgmt_pending_remove(cmd);
2835 hci_dev_unlock(hdev);
2839 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2842 struct mgmt_cp_disconnect *cp = data;
2843 struct mgmt_rp_disconnect rp;
2844 struct pending_cmd *cmd;
2845 struct hci_conn *conn;
2850 memset(&rp, 0, sizeof(rp));
2851 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2852 rp.addr.type = cp->addr.type;
2854 if (!bdaddr_type_is_valid(cp->addr.type))
2855 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2856 MGMT_STATUS_INVALID_PARAMS,
2861 if (!test_bit(HCI_UP, &hdev->flags)) {
2862 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2863 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2867 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2868 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2869 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2873 if (cp->addr.type == BDADDR_BREDR)
2874 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2877 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2879 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2880 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2881 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2885 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2891 cmd->cmd_complete = generic_cmd_complete;
2893 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2895 mgmt_pending_remove(cmd);
2898 hci_dev_unlock(hdev);
2902 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2904 switch (link_type) {
2906 switch (addr_type) {
2907 case ADDR_LE_DEV_PUBLIC:
2908 return BDADDR_LE_PUBLIC;
2911 /* Fallback to LE Random address type */
2912 return BDADDR_LE_RANDOM;
2916 /* Fallback to BR/EDR type */
2917 return BDADDR_BREDR;
2921 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2924 struct mgmt_rp_get_connections *rp;
2934 if (!hdev_is_powered(hdev)) {
2935 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2936 MGMT_STATUS_NOT_POWERED);
2941 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2942 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2946 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2947 rp = kmalloc(rp_len, GFP_KERNEL);
2954 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2955 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2957 bacpy(&rp->addr[i].bdaddr, &c->dst);
2958 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2959 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2964 rp->conn_count = cpu_to_le16(i);
2966 /* Recalculate length in case of filtered SCO connections, etc */
2967 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2969 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2975 hci_dev_unlock(hdev);
2979 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2980 struct mgmt_cp_pin_code_neg_reply *cp)
2982 struct pending_cmd *cmd;
2985 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2990 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2991 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2993 mgmt_pending_remove(cmd);
2998 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3001 struct hci_conn *conn;
3002 struct mgmt_cp_pin_code_reply *cp = data;
3003 struct hci_cp_pin_code_reply reply;
3004 struct pending_cmd *cmd;
3011 if (!hdev_is_powered(hdev)) {
3012 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3013 MGMT_STATUS_NOT_POWERED);
3017 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3019 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3020 MGMT_STATUS_NOT_CONNECTED);
3024 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3025 struct mgmt_cp_pin_code_neg_reply ncp;
3027 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3029 BT_ERR("PIN code is not 16 bytes long");
3031 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3033 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3034 MGMT_STATUS_INVALID_PARAMS);
3039 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3045 cmd->cmd_complete = addr_cmd_complete;
3047 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3048 reply.pin_len = cp->pin_len;
3049 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3051 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3053 mgmt_pending_remove(cmd);
3056 hci_dev_unlock(hdev);
3060 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3063 struct mgmt_cp_set_io_capability *cp = data;
3067 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3068 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3069 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3073 hdev->io_capability = cp->io_capability;
3075 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3076 hdev->io_capability);
3078 hci_dev_unlock(hdev);
3080 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3084 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3086 struct hci_dev *hdev = conn->hdev;
3087 struct pending_cmd *cmd;
3089 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3090 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3093 if (cmd->user_data != conn)
3102 static int pairing_complete(struct pending_cmd *cmd, u8 status)
3104 struct mgmt_rp_pair_device rp;
3105 struct hci_conn *conn = cmd->user_data;
3108 bacpy(&rp.addr.bdaddr, &conn->dst);
3109 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3111 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3114 /* So we don't get further callbacks for this connection */
3115 conn->connect_cfm_cb = NULL;
3116 conn->security_cfm_cb = NULL;
3117 conn->disconn_cfm_cb = NULL;
3119 hci_conn_drop(conn);
3121 /* The device is paired so there is no need to remove
3122 * its connection parameters anymore.
3124 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3131 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3133 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3134 struct pending_cmd *cmd;
3136 cmd = find_pairing(conn);
3138 cmd->cmd_complete(cmd, status);
3139 mgmt_pending_remove(cmd);
3143 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3145 struct pending_cmd *cmd;
3147 BT_DBG("status %u", status);
3149 cmd = find_pairing(conn);
3151 BT_DBG("Unable to find a pending command");
3155 cmd->cmd_complete(cmd, mgmt_status(status));
3156 mgmt_pending_remove(cmd);
3159 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3161 struct pending_cmd *cmd;
3163 BT_DBG("status %u", status);
3168 cmd = find_pairing(conn);
3170 BT_DBG("Unable to find a pending command");
3174 cmd->cmd_complete(cmd, mgmt_status(status));
3175 mgmt_pending_remove(cmd);
3178 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3181 struct mgmt_cp_pair_device *cp = data;
3182 struct mgmt_rp_pair_device rp;
3183 struct pending_cmd *cmd;
3184 u8 sec_level, auth_type;
3185 struct hci_conn *conn;
3190 memset(&rp, 0, sizeof(rp));
3191 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3192 rp.addr.type = cp->addr.type;
3194 if (!bdaddr_type_is_valid(cp->addr.type))
3195 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3196 MGMT_STATUS_INVALID_PARAMS,
3199 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3200 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3201 MGMT_STATUS_INVALID_PARAMS,
3206 if (!hdev_is_powered(hdev)) {
3207 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3208 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3212 sec_level = BT_SECURITY_MEDIUM;
3213 auth_type = HCI_AT_DEDICATED_BONDING;
3215 if (cp->addr.type == BDADDR_BREDR) {
3216 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3221 /* Convert from L2CAP channel address type to HCI address type
3223 if (cp->addr.type == BDADDR_LE_PUBLIC)
3224 addr_type = ADDR_LE_DEV_PUBLIC;
3226 addr_type = ADDR_LE_DEV_RANDOM;
3228 /* When pairing a new device, it is expected to remember
3229 * this device for future connections. Adding the connection
3230 * parameter information ahead of time allows tracking
3231 * of the slave preferred values and will speed up any
3232 * further connection establishment.
3234 * If connection parameters already exist, then they
3235 * will be kept and this function does nothing.
3237 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3239 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3240 sec_level, HCI_LE_CONN_TIMEOUT,
3247 if (PTR_ERR(conn) == -EBUSY)
3248 status = MGMT_STATUS_BUSY;
3250 status = MGMT_STATUS_CONNECT_FAILED;
3252 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3258 if (conn->connect_cfm_cb) {
3259 hci_conn_drop(conn);
3260 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3261 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3265 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3268 hci_conn_drop(conn);
3272 cmd->cmd_complete = pairing_complete;
3274 /* For LE, just connecting isn't a proof that the pairing finished */
3275 if (cp->addr.type == BDADDR_BREDR) {
3276 conn->connect_cfm_cb = pairing_complete_cb;
3277 conn->security_cfm_cb = pairing_complete_cb;
3278 conn->disconn_cfm_cb = pairing_complete_cb;
3280 conn->connect_cfm_cb = le_pairing_complete_cb;
3281 conn->security_cfm_cb = le_pairing_complete_cb;
3282 conn->disconn_cfm_cb = le_pairing_complete_cb;
3285 conn->io_capability = cp->io_cap;
3286 cmd->user_data = hci_conn_get(conn);
3288 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3289 hci_conn_security(conn, sec_level, auth_type, true)) {
3290 cmd->cmd_complete(cmd, 0);
3291 mgmt_pending_remove(cmd);
3297 hci_dev_unlock(hdev);
3301 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3304 struct mgmt_addr_info *addr = data;
3305 struct pending_cmd *cmd;
3306 struct hci_conn *conn;
3313 if (!hdev_is_powered(hdev)) {
3314 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3315 MGMT_STATUS_NOT_POWERED);
3319 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3321 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3322 MGMT_STATUS_INVALID_PARAMS);
3326 conn = cmd->user_data;
3328 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3329 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3330 MGMT_STATUS_INVALID_PARAMS);
3334 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3335 mgmt_pending_remove(cmd);
3337 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3338 addr, sizeof(*addr));
3340 hci_dev_unlock(hdev);
3344 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3345 struct mgmt_addr_info *addr, u16 mgmt_op,
3346 u16 hci_op, __le32 passkey)
3348 struct pending_cmd *cmd;
3349 struct hci_conn *conn;
3354 if (!hdev_is_powered(hdev)) {
3355 err = cmd_complete(sk, hdev->id, mgmt_op,
3356 MGMT_STATUS_NOT_POWERED, addr,
3361 if (addr->type == BDADDR_BREDR)
3362 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3364 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3367 err = cmd_complete(sk, hdev->id, mgmt_op,
3368 MGMT_STATUS_NOT_CONNECTED, addr,
3373 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3374 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3376 err = cmd_complete(sk, hdev->id, mgmt_op,
3377 MGMT_STATUS_SUCCESS, addr,
3380 err = cmd_complete(sk, hdev->id, mgmt_op,
3381 MGMT_STATUS_FAILED, addr,
3387 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3393 cmd->cmd_complete = addr_cmd_complete;
3395 /* Continue with pairing via HCI */
3396 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3397 struct hci_cp_user_passkey_reply cp;
3399 bacpy(&cp.bdaddr, &addr->bdaddr);
3400 cp.passkey = passkey;
3401 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3403 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3407 mgmt_pending_remove(cmd);
3410 hci_dev_unlock(hdev);
3414 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3415 void *data, u16 len)
3417 struct mgmt_cp_pin_code_neg_reply *cp = data;
3421 return user_pairing_resp(sk, hdev, &cp->addr,
3422 MGMT_OP_PIN_CODE_NEG_REPLY,
3423 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3426 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3429 struct mgmt_cp_user_confirm_reply *cp = data;
3433 if (len != sizeof(*cp))
3434 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3435 MGMT_STATUS_INVALID_PARAMS);
3437 return user_pairing_resp(sk, hdev, &cp->addr,
3438 MGMT_OP_USER_CONFIRM_REPLY,
3439 HCI_OP_USER_CONFIRM_REPLY, 0);
3442 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3443 void *data, u16 len)
3445 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3449 return user_pairing_resp(sk, hdev, &cp->addr,
3450 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3451 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3454 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3457 struct mgmt_cp_user_passkey_reply *cp = data;
3461 return user_pairing_resp(sk, hdev, &cp->addr,
3462 MGMT_OP_USER_PASSKEY_REPLY,
3463 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3466 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3467 void *data, u16 len)
3469 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3473 return user_pairing_resp(sk, hdev, &cp->addr,
3474 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3475 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3478 static void update_name(struct hci_request *req)
3480 struct hci_dev *hdev = req->hdev;
3481 struct hci_cp_write_local_name cp;
3483 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3485 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3488 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3490 struct mgmt_cp_set_local_name *cp;
3491 struct pending_cmd *cmd;
3493 BT_DBG("status 0x%02x", status);
3497 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3504 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3505 mgmt_status(status));
3507 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3510 mgmt_pending_remove(cmd);
3513 hci_dev_unlock(hdev);
3516 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3519 struct mgmt_cp_set_local_name *cp = data;
3520 struct pending_cmd *cmd;
3521 struct hci_request req;
3528 /* If the old values are the same as the new ones just return a
3529 * direct command complete event.
3531 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3532 !memcmp(hdev->short_name, cp->short_name,
3533 sizeof(hdev->short_name))) {
3534 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3539 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3541 if (!hdev_is_powered(hdev)) {
3542 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3544 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3549 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3555 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3561 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3563 hci_req_init(&req, hdev);
3565 if (lmp_bredr_capable(hdev)) {
3570 /* The name is stored in the scan response data and so
3571 * no need to udpate the advertising data here.
3573 if (lmp_le_capable(hdev))
3574 update_scan_rsp_data(&req);
3576 err = hci_req_run(&req, set_name_complete);
3578 mgmt_pending_remove(cmd);
3581 hci_dev_unlock(hdev);
3585 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3586 void *data, u16 data_len)
3588 struct pending_cmd *cmd;
3591 BT_DBG("%s", hdev->name);
3595 if (!hdev_is_powered(hdev)) {
3596 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3597 MGMT_STATUS_NOT_POWERED);
3601 if (!lmp_ssp_capable(hdev)) {
3602 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3603 MGMT_STATUS_NOT_SUPPORTED);
3607 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3608 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3613 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3619 if (bredr_sc_enabled(hdev))
3620 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3623 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3626 mgmt_pending_remove(cmd);
3629 hci_dev_unlock(hdev);
3633 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3634 void *data, u16 len)
3638 BT_DBG("%s ", hdev->name);
3642 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3643 struct mgmt_cp_add_remote_oob_data *cp = data;
3646 if (cp->addr.type != BDADDR_BREDR) {
3647 err = cmd_complete(sk, hdev->id,
3648 MGMT_OP_ADD_REMOTE_OOB_DATA,
3649 MGMT_STATUS_INVALID_PARAMS,
3650 &cp->addr, sizeof(cp->addr));
3654 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3655 cp->addr.type, cp->hash,
3656 cp->rand, NULL, NULL);
3658 status = MGMT_STATUS_FAILED;
3660 status = MGMT_STATUS_SUCCESS;
3662 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3663 status, &cp->addr, sizeof(cp->addr));
3664 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3665 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3666 u8 *rand192, *hash192;
3669 if (cp->addr.type != BDADDR_BREDR) {
3670 err = cmd_complete(sk, hdev->id,
3671 MGMT_OP_ADD_REMOTE_OOB_DATA,
3672 MGMT_STATUS_INVALID_PARAMS,
3673 &cp->addr, sizeof(cp->addr));
3677 if (bdaddr_type_is_le(cp->addr.type)) {
3681 rand192 = cp->rand192;
3682 hash192 = cp->hash192;
3685 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3686 cp->addr.type, hash192, rand192,
3687 cp->hash256, cp->rand256);
3689 status = MGMT_STATUS_FAILED;
3691 status = MGMT_STATUS_SUCCESS;
3693 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3694 status, &cp->addr, sizeof(cp->addr));
3696 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3697 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3698 MGMT_STATUS_INVALID_PARAMS);
3702 hci_dev_unlock(hdev);
3706 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3707 void *data, u16 len)
3709 struct mgmt_cp_remove_remote_oob_data *cp = data;
3713 BT_DBG("%s", hdev->name);
3715 if (cp->addr.type != BDADDR_BREDR)
3716 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3717 MGMT_STATUS_INVALID_PARAMS,
3718 &cp->addr, sizeof(cp->addr));
3722 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3723 hci_remote_oob_data_clear(hdev);
3724 status = MGMT_STATUS_SUCCESS;
3728 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3730 status = MGMT_STATUS_INVALID_PARAMS;
3732 status = MGMT_STATUS_SUCCESS;
3735 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3736 status, &cp->addr, sizeof(cp->addr));
3738 hci_dev_unlock(hdev);
3742 static bool trigger_discovery(struct hci_request *req, u8 *status)
3744 struct hci_dev *hdev = req->hdev;
3745 struct hci_cp_le_set_scan_param param_cp;
3746 struct hci_cp_le_set_scan_enable enable_cp;
3747 struct hci_cp_inquiry inq_cp;
3748 /* General inquiry access code (GIAC) */
3749 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3753 switch (hdev->discovery.type) {
3754 case DISCOV_TYPE_BREDR:
3755 *status = mgmt_bredr_support(hdev);
3759 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3760 *status = MGMT_STATUS_BUSY;
3764 hci_inquiry_cache_flush(hdev);
3766 memset(&inq_cp, 0, sizeof(inq_cp));
3767 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3768 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3769 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3772 case DISCOV_TYPE_LE:
3773 case DISCOV_TYPE_INTERLEAVED:
3774 *status = mgmt_le_support(hdev);
3778 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3779 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3780 *status = MGMT_STATUS_NOT_SUPPORTED;
3784 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3785 /* Don't let discovery abort an outgoing
3786 * connection attempt that's using directed
3789 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3791 *status = MGMT_STATUS_REJECTED;
3795 disable_advertising(req);
3798 /* If controller is scanning, it means the background scanning
3799 * is running. Thus, we should temporarily stop it in order to
3800 * set the discovery scanning parameters.
3802 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3803 hci_req_add_le_scan_disable(req);
3805 memset(¶m_cp, 0, sizeof(param_cp));
3807 /* All active scans will be done with either a resolvable
3808 * private address (when privacy feature has been enabled)
3809 * or non-resolvable private address.
3811 err = hci_update_random_address(req, true, &own_addr_type);
3813 *status = MGMT_STATUS_FAILED;
3817 param_cp.type = LE_SCAN_ACTIVE;
3818 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3819 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3820 param_cp.own_address_type = own_addr_type;
3821 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3824 memset(&enable_cp, 0, sizeof(enable_cp));
3825 enable_cp.enable = LE_SCAN_ENABLE;
3826 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3827 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3832 *status = MGMT_STATUS_INVALID_PARAMS;
3839 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3842 struct pending_cmd *cmd;
3843 unsigned long timeout;
3845 BT_DBG("status %d", status);
3849 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3851 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3854 cmd->cmd_complete(cmd, mgmt_status(status));
3855 mgmt_pending_remove(cmd);
3859 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3863 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3865 switch (hdev->discovery.type) {
3866 case DISCOV_TYPE_LE:
3867 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3869 case DISCOV_TYPE_INTERLEAVED:
3870 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3872 case DISCOV_TYPE_BREDR:
3876 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3882 queue_delayed_work(hdev->workqueue,
3883 &hdev->le_scan_disable, timeout);
3886 hci_dev_unlock(hdev);
3889 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3890 void *data, u16 len)
3892 struct mgmt_cp_start_discovery *cp = data;
3893 struct pending_cmd *cmd;
3894 struct hci_request req;
3898 BT_DBG("%s", hdev->name);
3902 if (!hdev_is_powered(hdev)) {
3903 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3904 MGMT_STATUS_NOT_POWERED,
3905 &cp->type, sizeof(cp->type));
3909 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3910 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3911 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3912 MGMT_STATUS_BUSY, &cp->type,
3917 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3923 cmd->cmd_complete = generic_cmd_complete;
3925 /* Clear the discovery filter first to free any previously
3926 * allocated memory for the UUID list.
3928 hci_discovery_filter_clear(hdev);
3930 hdev->discovery.type = cp->type;
3931 hdev->discovery.report_invalid_rssi = false;
3933 hci_req_init(&req, hdev);
3935 if (!trigger_discovery(&req, &status)) {
3936 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3937 status, &cp->type, sizeof(cp->type));
3938 mgmt_pending_remove(cmd);
3942 err = hci_req_run(&req, start_discovery_complete);
3944 mgmt_pending_remove(cmd);
3948 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3951 hci_dev_unlock(hdev);
3955 static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
3957 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3961 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3962 void *data, u16 len)
3964 struct mgmt_cp_start_service_discovery *cp = data;
3965 struct pending_cmd *cmd;
3966 struct hci_request req;
3967 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3968 u16 uuid_count, expected_len;
3972 BT_DBG("%s", hdev->name);
3976 if (!hdev_is_powered(hdev)) {
3977 err = cmd_complete(sk, hdev->id,
3978 MGMT_OP_START_SERVICE_DISCOVERY,
3979 MGMT_STATUS_NOT_POWERED,
3980 &cp->type, sizeof(cp->type));
3984 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3985 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3986 err = cmd_complete(sk, hdev->id,
3987 MGMT_OP_START_SERVICE_DISCOVERY,
3988 MGMT_STATUS_BUSY, &cp->type,
3993 uuid_count = __le16_to_cpu(cp->uuid_count);
3994 if (uuid_count > max_uuid_count) {
3995 BT_ERR("service_discovery: too big uuid_count value %u",
3997 err = cmd_complete(sk, hdev->id,
3998 MGMT_OP_START_SERVICE_DISCOVERY,
3999 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4004 expected_len = sizeof(*cp) + uuid_count * 16;
4005 if (expected_len != len) {
4006 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4008 err = cmd_complete(sk, hdev->id,
4009 MGMT_OP_START_SERVICE_DISCOVERY,
4010 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4015 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4022 cmd->cmd_complete = service_discovery_cmd_complete;
4024 /* Clear the discovery filter first to free any previously
4025 * allocated memory for the UUID list.
4027 hci_discovery_filter_clear(hdev);
4029 hdev->discovery.type = cp->type;
4030 hdev->discovery.rssi = cp->rssi;
4031 hdev->discovery.uuid_count = uuid_count;
4033 if (uuid_count > 0) {
4034 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4036 if (!hdev->discovery.uuids) {
4037 err = cmd_complete(sk, hdev->id,
4038 MGMT_OP_START_SERVICE_DISCOVERY,
4040 &cp->type, sizeof(cp->type));
4041 mgmt_pending_remove(cmd);
4046 hci_req_init(&req, hdev);
4048 if (!trigger_discovery(&req, &status)) {
4049 err = cmd_complete(sk, hdev->id,
4050 MGMT_OP_START_SERVICE_DISCOVERY,
4051 status, &cp->type, sizeof(cp->type));
4052 mgmt_pending_remove(cmd);
4056 err = hci_req_run(&req, start_discovery_complete);
4058 mgmt_pending_remove(cmd);
4062 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4065 hci_dev_unlock(hdev);
4069 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4071 struct pending_cmd *cmd;
4073 BT_DBG("status %d", status);
4077 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4079 cmd->cmd_complete(cmd, mgmt_status(status));
4080 mgmt_pending_remove(cmd);
4084 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4086 hci_dev_unlock(hdev);
4089 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4092 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4093 struct pending_cmd *cmd;
4094 struct hci_request req;
4097 BT_DBG("%s", hdev->name);
4101 if (!hci_discovery_active(hdev)) {
4102 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4103 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4104 sizeof(mgmt_cp->type));
4108 if (hdev->discovery.type != mgmt_cp->type) {
4109 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4110 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4111 sizeof(mgmt_cp->type));
4115 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4121 cmd->cmd_complete = generic_cmd_complete;
4123 hci_req_init(&req, hdev);
4125 hci_stop_discovery(&req);
4127 err = hci_req_run(&req, stop_discovery_complete);
4129 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4133 mgmt_pending_remove(cmd);
4135 /* If no HCI commands were sent we're done */
4136 if (err == -ENODATA) {
4137 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4138 &mgmt_cp->type, sizeof(mgmt_cp->type));
4139 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4143 hci_dev_unlock(hdev);
4147 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4150 struct mgmt_cp_confirm_name *cp = data;
4151 struct inquiry_entry *e;
4154 BT_DBG("%s", hdev->name);
4158 if (!hci_discovery_active(hdev)) {
4159 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4160 MGMT_STATUS_FAILED, &cp->addr,
4165 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4167 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4168 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4173 if (cp->name_known) {
4174 e->name_state = NAME_KNOWN;
4177 e->name_state = NAME_NEEDED;
4178 hci_inquiry_cache_update_resolve(hdev, e);
4181 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4185 hci_dev_unlock(hdev);
4189 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4192 struct mgmt_cp_block_device *cp = data;
4196 BT_DBG("%s", hdev->name);
4198 if (!bdaddr_type_is_valid(cp->addr.type))
4199 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4200 MGMT_STATUS_INVALID_PARAMS,
4201 &cp->addr, sizeof(cp->addr));
4205 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4208 status = MGMT_STATUS_FAILED;
4212 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4214 status = MGMT_STATUS_SUCCESS;
4217 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4218 &cp->addr, sizeof(cp->addr));
4220 hci_dev_unlock(hdev);
4225 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4228 struct mgmt_cp_unblock_device *cp = data;
4232 BT_DBG("%s", hdev->name);
4234 if (!bdaddr_type_is_valid(cp->addr.type))
4235 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4236 MGMT_STATUS_INVALID_PARAMS,
4237 &cp->addr, sizeof(cp->addr));
4241 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4244 status = MGMT_STATUS_INVALID_PARAMS;
4248 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4250 status = MGMT_STATUS_SUCCESS;
4253 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4254 &cp->addr, sizeof(cp->addr));
4256 hci_dev_unlock(hdev);
4261 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4264 struct mgmt_cp_set_device_id *cp = data;
4265 struct hci_request req;
4269 BT_DBG("%s", hdev->name);
4271 source = __le16_to_cpu(cp->source);
4273 if (source > 0x0002)
4274 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4275 MGMT_STATUS_INVALID_PARAMS);
4279 hdev->devid_source = source;
4280 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4281 hdev->devid_product = __le16_to_cpu(cp->product);
4282 hdev->devid_version = __le16_to_cpu(cp->version);
4284 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4286 hci_req_init(&req, hdev);
4288 hci_req_run(&req, NULL);
4290 hci_dev_unlock(hdev);
4295 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4298 struct cmd_lookup match = { NULL, hdev };
4303 u8 mgmt_err = mgmt_status(status);
4305 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4306 cmd_status_rsp, &mgmt_err);
4310 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4311 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4313 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4315 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4318 new_settings(hdev, match.sk);
4324 hci_dev_unlock(hdev);
4327 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4330 struct mgmt_mode *cp = data;
4331 struct pending_cmd *cmd;
4332 struct hci_request req;
4333 u8 val, enabled, status;
4336 BT_DBG("request for %s", hdev->name);
4338 status = mgmt_le_support(hdev);
4340 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4343 if (cp->val != 0x00 && cp->val != 0x01)
4344 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4345 MGMT_STATUS_INVALID_PARAMS);
4350 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4352 /* The following conditions are ones which mean that we should
4353 * not do any HCI communication but directly send a mgmt
4354 * response to user space (after toggling the flag if
4357 if (!hdev_is_powered(hdev) || val == enabled ||
4358 hci_conn_num(hdev, LE_LINK) > 0 ||
4359 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4360 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4361 bool changed = false;
4363 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4364 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4368 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4373 err = new_settings(hdev, sk);
4378 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4379 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4380 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4385 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4391 hci_req_init(&req, hdev);
4394 enable_advertising(&req);
4396 disable_advertising(&req);
4398 err = hci_req_run(&req, set_advertising_complete);
4400 mgmt_pending_remove(cmd);
4403 hci_dev_unlock(hdev);
4407 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4408 void *data, u16 len)
4410 struct mgmt_cp_set_static_address *cp = data;
4413 BT_DBG("%s", hdev->name);
4415 if (!lmp_le_capable(hdev))
4416 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4417 MGMT_STATUS_NOT_SUPPORTED);
4419 if (hdev_is_powered(hdev))
4420 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4421 MGMT_STATUS_REJECTED);
4423 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4424 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4425 return cmd_status(sk, hdev->id,
4426 MGMT_OP_SET_STATIC_ADDRESS,
4427 MGMT_STATUS_INVALID_PARAMS);
4429 /* Two most significant bits shall be set */
4430 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4431 return cmd_status(sk, hdev->id,
4432 MGMT_OP_SET_STATIC_ADDRESS,
4433 MGMT_STATUS_INVALID_PARAMS);
4438 bacpy(&hdev->static_addr, &cp->bdaddr);
4440 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4442 hci_dev_unlock(hdev);
4447 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4448 void *data, u16 len)
4450 struct mgmt_cp_set_scan_params *cp = data;
4451 __u16 interval, window;
4454 BT_DBG("%s", hdev->name);
4456 if (!lmp_le_capable(hdev))
4457 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4458 MGMT_STATUS_NOT_SUPPORTED);
4460 interval = __le16_to_cpu(cp->interval);
4462 if (interval < 0x0004 || interval > 0x4000)
4463 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4464 MGMT_STATUS_INVALID_PARAMS);
4466 window = __le16_to_cpu(cp->window);
4468 if (window < 0x0004 || window > 0x4000)
4469 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4470 MGMT_STATUS_INVALID_PARAMS);
4472 if (window > interval)
4473 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4474 MGMT_STATUS_INVALID_PARAMS);
4478 hdev->le_scan_interval = interval;
4479 hdev->le_scan_window = window;
4481 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4483 /* If background scan is running, restart it so new parameters are
4486 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4487 hdev->discovery.state == DISCOVERY_STOPPED) {
4488 struct hci_request req;
4490 hci_req_init(&req, hdev);
4492 hci_req_add_le_scan_disable(&req);
4493 hci_req_add_le_passive_scan(&req);
4495 hci_req_run(&req, NULL);
4498 hci_dev_unlock(hdev);
4503 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4506 struct pending_cmd *cmd;
4508 BT_DBG("status 0x%02x", status);
4512 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4517 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4518 mgmt_status(status));
4520 struct mgmt_mode *cp = cmd->param;
4523 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4525 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4527 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4528 new_settings(hdev, cmd->sk);
4531 mgmt_pending_remove(cmd);
4534 hci_dev_unlock(hdev);
4537 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4538 void *data, u16 len)
4540 struct mgmt_mode *cp = data;
4541 struct pending_cmd *cmd;
4542 struct hci_request req;
4545 BT_DBG("%s", hdev->name);
4547 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4548 hdev->hci_ver < BLUETOOTH_VER_1_2)
4549 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4550 MGMT_STATUS_NOT_SUPPORTED);
4552 if (cp->val != 0x00 && cp->val != 0x01)
4553 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4554 MGMT_STATUS_INVALID_PARAMS);
4556 if (!hdev_is_powered(hdev))
4557 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4558 MGMT_STATUS_NOT_POWERED);
4560 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4561 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4562 MGMT_STATUS_REJECTED);
4566 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4567 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4572 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4573 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4578 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4585 hci_req_init(&req, hdev);
4587 write_fast_connectable(&req, cp->val);
4589 err = hci_req_run(&req, fast_connectable_complete);
4591 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4592 MGMT_STATUS_FAILED);
4593 mgmt_pending_remove(cmd);
4597 hci_dev_unlock(hdev);
4602 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4604 struct pending_cmd *cmd;
4606 BT_DBG("status 0x%02x", status);
4610 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4615 u8 mgmt_err = mgmt_status(status);
4617 /* We need to restore the flag if related HCI commands
4620 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4622 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4624 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4625 new_settings(hdev, cmd->sk);
4628 mgmt_pending_remove(cmd);
4631 hci_dev_unlock(hdev);
4634 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4636 struct mgmt_mode *cp = data;
4637 struct pending_cmd *cmd;
4638 struct hci_request req;
4641 BT_DBG("request for %s", hdev->name);
4643 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4644 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4645 MGMT_STATUS_NOT_SUPPORTED);
4647 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4648 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4649 MGMT_STATUS_REJECTED);
4651 if (cp->val != 0x00 && cp->val != 0x01)
4652 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4653 MGMT_STATUS_INVALID_PARAMS);
4657 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4658 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4662 if (!hdev_is_powered(hdev)) {
4664 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4665 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4666 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4667 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4668 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4671 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4673 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4677 err = new_settings(hdev, sk);
4681 /* Reject disabling when powered on */
4683 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4684 MGMT_STATUS_REJECTED);
4687 /* When configuring a dual-mode controller to operate
4688 * with LE only and using a static address, then switching
4689 * BR/EDR back on is not allowed.
4691 * Dual-mode controllers shall operate with the public
4692 * address as its identity address for BR/EDR and LE. So
4693 * reject the attempt to create an invalid configuration.
4695 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4696 bacmp(&hdev->static_addr, BDADDR_ANY)) {
4697 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4698 MGMT_STATUS_REJECTED);
4703 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4704 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4709 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4715 /* We need to flip the bit already here so that update_adv_data
4716 * generates the correct flags.
4718 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4720 hci_req_init(&req, hdev);
4722 write_fast_connectable(&req, false);
4723 __hci_update_page_scan(&req);
4725 /* Since only the advertising data flags will change, there
4726 * is no need to update the scan response data.
4728 update_adv_data(&req);
4730 err = hci_req_run(&req, set_bredr_complete);
4732 mgmt_pending_remove(cmd);
4735 hci_dev_unlock(hdev);
4739 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4740 void *data, u16 len)
4742 struct mgmt_mode *cp = data;
4743 struct pending_cmd *cmd;
4747 BT_DBG("request for %s", hdev->name);
4749 if (!lmp_sc_capable(hdev) &&
4750 !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4751 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4752 MGMT_STATUS_NOT_SUPPORTED);
4754 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4755 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4756 MGMT_STATUS_INVALID_PARAMS);
4760 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4761 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4765 changed = !test_and_set_bit(HCI_SC_ENABLED,
4767 if (cp->val == 0x02)
4768 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4770 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4772 changed = test_and_clear_bit(HCI_SC_ENABLED,
4774 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4777 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4782 err = new_settings(hdev, sk);
4787 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4788 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4795 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4796 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4797 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4801 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4807 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4809 mgmt_pending_remove(cmd);
4813 if (cp->val == 0x02)
4814 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4816 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4819 hci_dev_unlock(hdev);
4823 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4824 void *data, u16 len)
4826 struct mgmt_mode *cp = data;
4827 bool changed, use_changed;
4830 BT_DBG("request for %s", hdev->name);
4832 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4833 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4834 MGMT_STATUS_INVALID_PARAMS);
4839 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4842 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4845 if (cp->val == 0x02)
4846 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4849 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4852 if (hdev_is_powered(hdev) && use_changed &&
4853 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4854 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4855 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4856 sizeof(mode), &mode);
4859 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4864 err = new_settings(hdev, sk);
4867 hci_dev_unlock(hdev);
4871 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4874 struct mgmt_cp_set_privacy *cp = cp_data;
4878 BT_DBG("request for %s", hdev->name);
4880 if (!lmp_le_capable(hdev))
4881 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4882 MGMT_STATUS_NOT_SUPPORTED);
4884 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4885 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4886 MGMT_STATUS_INVALID_PARAMS);
4888 if (hdev_is_powered(hdev))
4889 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4890 MGMT_STATUS_REJECTED);
4894 /* If user space supports this command it is also expected to
4895 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4897 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4900 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4901 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4902 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4904 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4905 memset(hdev->irk, 0, sizeof(hdev->irk));
4906 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4909 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4914 err = new_settings(hdev, sk);
4917 hci_dev_unlock(hdev);
4921 static bool irk_is_valid(struct mgmt_irk_info *irk)
4923 switch (irk->addr.type) {
4924 case BDADDR_LE_PUBLIC:
4927 case BDADDR_LE_RANDOM:
4928 /* Two most significant bits shall be set */
4929 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4937 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4940 struct mgmt_cp_load_irks *cp = cp_data;
4941 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4942 sizeof(struct mgmt_irk_info));
4943 u16 irk_count, expected_len;
4946 BT_DBG("request for %s", hdev->name);
4948 if (!lmp_le_capable(hdev))
4949 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4950 MGMT_STATUS_NOT_SUPPORTED);
4952 irk_count = __le16_to_cpu(cp->irk_count);
4953 if (irk_count > max_irk_count) {
4954 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4955 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4956 MGMT_STATUS_INVALID_PARAMS);
4959 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4960 if (expected_len != len) {
4961 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4963 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4964 MGMT_STATUS_INVALID_PARAMS);
4967 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4969 for (i = 0; i < irk_count; i++) {
4970 struct mgmt_irk_info *key = &cp->irks[i];
4972 if (!irk_is_valid(key))
4973 return cmd_status(sk, hdev->id,
4975 MGMT_STATUS_INVALID_PARAMS);
4980 hci_smp_irks_clear(hdev);
4982 for (i = 0; i < irk_count; i++) {
4983 struct mgmt_irk_info *irk = &cp->irks[i];
4986 if (irk->addr.type == BDADDR_LE_PUBLIC)
4987 addr_type = ADDR_LE_DEV_PUBLIC;
4989 addr_type = ADDR_LE_DEV_RANDOM;
4991 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4995 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4997 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4999 hci_dev_unlock(hdev);
5004 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5006 if (key->master != 0x00 && key->master != 0x01)
5009 switch (key->addr.type) {
5010 case BDADDR_LE_PUBLIC:
5013 case BDADDR_LE_RANDOM:
5014 /* Two most significant bits shall be set */
5015 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5023 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5024 void *cp_data, u16 len)
5026 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5027 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5028 sizeof(struct mgmt_ltk_info));
5029 u16 key_count, expected_len;
5032 BT_DBG("request for %s", hdev->name);
5034 if (!lmp_le_capable(hdev))
5035 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5036 MGMT_STATUS_NOT_SUPPORTED);
5038 key_count = __le16_to_cpu(cp->key_count);
5039 if (key_count > max_key_count) {
5040 BT_ERR("load_ltks: too big key_count value %u", key_count);
5041 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5042 MGMT_STATUS_INVALID_PARAMS);
5045 expected_len = sizeof(*cp) + key_count *
5046 sizeof(struct mgmt_ltk_info);
5047 if (expected_len != len) {
5048 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5050 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5051 MGMT_STATUS_INVALID_PARAMS);
5054 BT_DBG("%s key_count %u", hdev->name, key_count);
5056 for (i = 0; i < key_count; i++) {
5057 struct mgmt_ltk_info *key = &cp->keys[i];
5059 if (!ltk_is_valid(key))
5060 return cmd_status(sk, hdev->id,
5061 MGMT_OP_LOAD_LONG_TERM_KEYS,
5062 MGMT_STATUS_INVALID_PARAMS);
5067 hci_smp_ltks_clear(hdev);
5069 for (i = 0; i < key_count; i++) {
5070 struct mgmt_ltk_info *key = &cp->keys[i];
5071 u8 type, addr_type, authenticated;
5073 if (key->addr.type == BDADDR_LE_PUBLIC)
5074 addr_type = ADDR_LE_DEV_PUBLIC;
5076 addr_type = ADDR_LE_DEV_RANDOM;
5078 switch (key->type) {
5079 case MGMT_LTK_UNAUTHENTICATED:
5080 authenticated = 0x00;
5081 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5083 case MGMT_LTK_AUTHENTICATED:
5084 authenticated = 0x01;
5085 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5087 case MGMT_LTK_P256_UNAUTH:
5088 authenticated = 0x00;
5089 type = SMP_LTK_P256;
5091 case MGMT_LTK_P256_AUTH:
5092 authenticated = 0x01;
5093 type = SMP_LTK_P256;
5095 case MGMT_LTK_P256_DEBUG:
5096 authenticated = 0x00;
5097 type = SMP_LTK_P256_DEBUG;
5102 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5103 authenticated, key->val, key->enc_size, key->ediv,
5107 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5110 hci_dev_unlock(hdev);
5115 static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5117 struct hci_conn *conn = cmd->user_data;
5118 struct mgmt_rp_get_conn_info rp;
5121 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5123 if (status == MGMT_STATUS_SUCCESS) {
5124 rp.rssi = conn->rssi;
5125 rp.tx_power = conn->tx_power;
5126 rp.max_tx_power = conn->max_tx_power;
5128 rp.rssi = HCI_RSSI_INVALID;
5129 rp.tx_power = HCI_TX_POWER_INVALID;
5130 rp.max_tx_power = HCI_TX_POWER_INVALID;
5133 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5136 hci_conn_drop(conn);
5142 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5145 struct hci_cp_read_rssi *cp;
5146 struct pending_cmd *cmd;
5147 struct hci_conn *conn;
5151 BT_DBG("status 0x%02x", hci_status);
5155 /* Commands sent in request are either Read RSSI or Read Transmit Power
5156 * Level so we check which one was last sent to retrieve connection
5157 * handle. Both commands have handle as first parameter so it's safe to
5158 * cast data on the same command struct.
5160 * First command sent is always Read RSSI and we fail only if it fails.
5161 * In other case we simply override error to indicate success as we
5162 * already remembered if TX power value is actually valid.
5164 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5166 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5167 status = MGMT_STATUS_SUCCESS;
5169 status = mgmt_status(hci_status);
5173 BT_ERR("invalid sent_cmd in conn_info response");
5177 handle = __le16_to_cpu(cp->handle);
5178 conn = hci_conn_hash_lookup_handle(hdev, handle);
5180 BT_ERR("unknown handle (%d) in conn_info response", handle);
5184 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5188 cmd->cmd_complete(cmd, status);
5189 mgmt_pending_remove(cmd);
5192 hci_dev_unlock(hdev);
5195 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5198 struct mgmt_cp_get_conn_info *cp = data;
5199 struct mgmt_rp_get_conn_info rp;
5200 struct hci_conn *conn;
5201 unsigned long conn_info_age;
5204 BT_DBG("%s", hdev->name);
5206 memset(&rp, 0, sizeof(rp));
5207 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5208 rp.addr.type = cp->addr.type;
5210 if (!bdaddr_type_is_valid(cp->addr.type))
5211 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5212 MGMT_STATUS_INVALID_PARAMS,
5217 if (!hdev_is_powered(hdev)) {
5218 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5219 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5223 if (cp->addr.type == BDADDR_BREDR)
5224 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5227 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5229 if (!conn || conn->state != BT_CONNECTED) {
5230 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5231 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5235 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5236 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5237 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5241 /* To avoid client trying to guess when to poll again for information we
5242 * calculate conn info age as random value between min/max set in hdev.
5244 conn_info_age = hdev->conn_info_min_age +
5245 prandom_u32_max(hdev->conn_info_max_age -
5246 hdev->conn_info_min_age);
5248 /* Query controller to refresh cached values if they are too old or were
5251 if (time_after(jiffies, conn->conn_info_timestamp +
5252 msecs_to_jiffies(conn_info_age)) ||
5253 !conn->conn_info_timestamp) {
5254 struct hci_request req;
5255 struct hci_cp_read_tx_power req_txp_cp;
5256 struct hci_cp_read_rssi req_rssi_cp;
5257 struct pending_cmd *cmd;
5259 hci_req_init(&req, hdev);
5260 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5261 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5264 /* For LE links TX power does not change thus we don't need to
5265 * query for it once value is known.
5267 if (!bdaddr_type_is_le(cp->addr.type) ||
5268 conn->tx_power == HCI_TX_POWER_INVALID) {
5269 req_txp_cp.handle = cpu_to_le16(conn->handle);
5270 req_txp_cp.type = 0x00;
5271 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5272 sizeof(req_txp_cp), &req_txp_cp);
5275 /* Max TX power needs to be read only once per connection */
5276 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5277 req_txp_cp.handle = cpu_to_le16(conn->handle);
5278 req_txp_cp.type = 0x01;
5279 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5280 sizeof(req_txp_cp), &req_txp_cp);
5283 err = hci_req_run(&req, conn_info_refresh_complete);
5287 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5294 hci_conn_hold(conn);
5295 cmd->user_data = hci_conn_get(conn);
5296 cmd->cmd_complete = conn_info_cmd_complete;
5298 conn->conn_info_timestamp = jiffies;
5300 /* Cache is valid, just reply with values cached in hci_conn */
5301 rp.rssi = conn->rssi;
5302 rp.tx_power = conn->tx_power;
5303 rp.max_tx_power = conn->max_tx_power;
5305 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5306 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5310 hci_dev_unlock(hdev);
5314 static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5316 struct hci_conn *conn = cmd->user_data;
5317 struct mgmt_rp_get_clock_info rp;
5318 struct hci_dev *hdev;
5321 memset(&rp, 0, sizeof(rp));
5322 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5327 hdev = hci_dev_get(cmd->index);
5329 rp.local_clock = cpu_to_le32(hdev->clock);
5334 rp.piconet_clock = cpu_to_le32(conn->clock);
5335 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5339 err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5343 hci_conn_drop(conn);
5350 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5352 struct hci_cp_read_clock *hci_cp;
5353 struct pending_cmd *cmd;
5354 struct hci_conn *conn;
5356 BT_DBG("%s status %u", hdev->name, status);
5360 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5364 if (hci_cp->which) {
5365 u16 handle = __le16_to_cpu(hci_cp->handle);
5366 conn = hci_conn_hash_lookup_handle(hdev, handle);
5371 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5375 cmd->cmd_complete(cmd, mgmt_status(status));
5376 mgmt_pending_remove(cmd);
5379 hci_dev_unlock(hdev);
5382 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5385 struct mgmt_cp_get_clock_info *cp = data;
5386 struct mgmt_rp_get_clock_info rp;
5387 struct hci_cp_read_clock hci_cp;
5388 struct pending_cmd *cmd;
5389 struct hci_request req;
5390 struct hci_conn *conn;
5393 BT_DBG("%s", hdev->name);
5395 memset(&rp, 0, sizeof(rp));
5396 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5397 rp.addr.type = cp->addr.type;
5399 if (cp->addr.type != BDADDR_BREDR)
5400 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5401 MGMT_STATUS_INVALID_PARAMS,
5406 if (!hdev_is_powered(hdev)) {
5407 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5408 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5412 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5413 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5415 if (!conn || conn->state != BT_CONNECTED) {
5416 err = cmd_complete(sk, hdev->id,
5417 MGMT_OP_GET_CLOCK_INFO,
5418 MGMT_STATUS_NOT_CONNECTED,
5426 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5432 cmd->cmd_complete = clock_info_cmd_complete;
5434 hci_req_init(&req, hdev);
5436 memset(&hci_cp, 0, sizeof(hci_cp));
5437 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5440 hci_conn_hold(conn);
5441 cmd->user_data = hci_conn_get(conn);
5443 hci_cp.handle = cpu_to_le16(conn->handle);
5444 hci_cp.which = 0x01; /* Piconet clock */
5445 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5448 err = hci_req_run(&req, get_clock_info_complete);
5450 mgmt_pending_remove(cmd);
5453 hci_dev_unlock(hdev);
5457 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5459 struct hci_conn *conn;
5461 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5465 if (conn->dst_type != type)
5468 if (conn->state != BT_CONNECTED)
5474 /* This function requires the caller holds hdev->lock */
5475 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5476 u8 addr_type, u8 auto_connect)
5478 struct hci_dev *hdev = req->hdev;
5479 struct hci_conn_params *params;
5481 params = hci_conn_params_add(hdev, addr, addr_type);
5485 if (params->auto_connect == auto_connect)
5488 list_del_init(¶ms->action);
5490 switch (auto_connect) {
5491 case HCI_AUTO_CONN_DISABLED:
5492 case HCI_AUTO_CONN_LINK_LOSS:
5493 __hci_update_background_scan(req);
5495 case HCI_AUTO_CONN_REPORT:
5496 list_add(¶ms->action, &hdev->pend_le_reports);
5497 __hci_update_background_scan(req);
5499 case HCI_AUTO_CONN_DIRECT:
5500 case HCI_AUTO_CONN_ALWAYS:
5501 if (!is_connected(hdev, addr, addr_type)) {
5502 list_add(¶ms->action, &hdev->pend_le_conns);
5503 __hci_update_background_scan(req);
5508 params->auto_connect = auto_connect;
5510 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5516 static void device_added(struct sock *sk, struct hci_dev *hdev,
5517 bdaddr_t *bdaddr, u8 type, u8 action)
5519 struct mgmt_ev_device_added ev;
5521 bacpy(&ev.addr.bdaddr, bdaddr);
5522 ev.addr.type = type;
5525 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5528 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5530 struct pending_cmd *cmd;
5532 BT_DBG("status 0x%02x", status);
5536 cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
5540 cmd->cmd_complete(cmd, mgmt_status(status));
5541 mgmt_pending_remove(cmd);
5544 hci_dev_unlock(hdev);
5547 static int add_device(struct sock *sk, struct hci_dev *hdev,
5548 void *data, u16 len)
5550 struct mgmt_cp_add_device *cp = data;
5551 struct pending_cmd *cmd;
5552 struct hci_request req;
5553 u8 auto_conn, addr_type;
5556 BT_DBG("%s", hdev->name);
5558 if (!bdaddr_type_is_valid(cp->addr.type) ||
5559 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5560 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5561 MGMT_STATUS_INVALID_PARAMS,
5562 &cp->addr, sizeof(cp->addr));
5564 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5565 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5566 MGMT_STATUS_INVALID_PARAMS,
5567 &cp->addr, sizeof(cp->addr));
5569 hci_req_init(&req, hdev);
5573 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5579 cmd->cmd_complete = addr_cmd_complete;
5581 if (cp->addr.type == BDADDR_BREDR) {
5582 /* Only incoming connections action is supported for now */
5583 if (cp->action != 0x01) {
5584 err = cmd->cmd_complete(cmd,
5585 MGMT_STATUS_INVALID_PARAMS);
5586 mgmt_pending_remove(cmd);
5590 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5595 __hci_update_page_scan(&req);
5600 if (cp->addr.type == BDADDR_LE_PUBLIC)
5601 addr_type = ADDR_LE_DEV_PUBLIC;
5603 addr_type = ADDR_LE_DEV_RANDOM;
5605 if (cp->action == 0x02)
5606 auto_conn = HCI_AUTO_CONN_ALWAYS;
5607 else if (cp->action == 0x01)
5608 auto_conn = HCI_AUTO_CONN_DIRECT;
5610 auto_conn = HCI_AUTO_CONN_REPORT;
5612 /* If the connection parameters don't exist for this device,
5613 * they will be created and configured with defaults.
5615 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5617 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5618 mgmt_pending_remove(cmd);
5623 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5625 err = hci_req_run(&req, add_device_complete);
5627 /* ENODATA means no HCI commands were needed (e.g. if
5628 * the adapter is powered off).
5630 if (err == -ENODATA)
5631 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5632 mgmt_pending_remove(cmd);
5636 hci_dev_unlock(hdev);
5640 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5641 bdaddr_t *bdaddr, u8 type)
5643 struct mgmt_ev_device_removed ev;
5645 bacpy(&ev.addr.bdaddr, bdaddr);
5646 ev.addr.type = type;
5648 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5651 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5653 struct pending_cmd *cmd;
5655 BT_DBG("status 0x%02x", status);
5659 cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5663 cmd->cmd_complete(cmd, mgmt_status(status));
5664 mgmt_pending_remove(cmd);
5667 hci_dev_unlock(hdev);
5670 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5671 void *data, u16 len)
5673 struct mgmt_cp_remove_device *cp = data;
5674 struct pending_cmd *cmd;
5675 struct hci_request req;
5678 BT_DBG("%s", hdev->name);
5680 hci_req_init(&req, hdev);
5684 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5690 cmd->cmd_complete = addr_cmd_complete;
5692 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5693 struct hci_conn_params *params;
5696 if (!bdaddr_type_is_valid(cp->addr.type)) {
5697 err = cmd->cmd_complete(cmd,
5698 MGMT_STATUS_INVALID_PARAMS);
5699 mgmt_pending_remove(cmd);
5703 if (cp->addr.type == BDADDR_BREDR) {
5704 err = hci_bdaddr_list_del(&hdev->whitelist,
5708 err = cmd->cmd_complete(cmd,
5709 MGMT_STATUS_INVALID_PARAMS);
5710 mgmt_pending_remove(cmd);
5714 __hci_update_page_scan(&req);
5716 device_removed(sk, hdev, &cp->addr.bdaddr,
5721 if (cp->addr.type == BDADDR_LE_PUBLIC)
5722 addr_type = ADDR_LE_DEV_PUBLIC;
5724 addr_type = ADDR_LE_DEV_RANDOM;
5726 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5729 err = cmd->cmd_complete(cmd,
5730 MGMT_STATUS_INVALID_PARAMS);
5731 mgmt_pending_remove(cmd);
5735 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5736 err = cmd->cmd_complete(cmd,
5737 MGMT_STATUS_INVALID_PARAMS);
5738 mgmt_pending_remove(cmd);
5742 list_del(¶ms->action);
5743 list_del(¶ms->list);
5745 __hci_update_background_scan(&req);
5747 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5749 struct hci_conn_params *p, *tmp;
5750 struct bdaddr_list *b, *btmp;
5752 if (cp->addr.type) {
5753 err = cmd->cmd_complete(cmd,
5754 MGMT_STATUS_INVALID_PARAMS);
5755 mgmt_pending_remove(cmd);
5759 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5760 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5765 __hci_update_page_scan(&req);
5767 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5768 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5770 device_removed(sk, hdev, &p->addr, p->addr_type);
5771 list_del(&p->action);
5776 BT_DBG("All LE connection parameters were removed");
5778 __hci_update_background_scan(&req);
5782 err = hci_req_run(&req, remove_device_complete);
5784 /* ENODATA means no HCI commands were needed (e.g. if
5785 * the adapter is powered off).
5787 if (err == -ENODATA)
5788 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5789 mgmt_pending_remove(cmd);
5793 hci_dev_unlock(hdev);
5797 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5800 struct mgmt_cp_load_conn_param *cp = data;
5801 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5802 sizeof(struct mgmt_conn_param));
5803 u16 param_count, expected_len;
5806 if (!lmp_le_capable(hdev))
5807 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5808 MGMT_STATUS_NOT_SUPPORTED);
5810 param_count = __le16_to_cpu(cp->param_count);
5811 if (param_count > max_param_count) {
5812 BT_ERR("load_conn_param: too big param_count value %u",
5814 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5815 MGMT_STATUS_INVALID_PARAMS);
5818 expected_len = sizeof(*cp) + param_count *
5819 sizeof(struct mgmt_conn_param);
5820 if (expected_len != len) {
5821 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5823 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5824 MGMT_STATUS_INVALID_PARAMS);
5827 BT_DBG("%s param_count %u", hdev->name, param_count);
5831 hci_conn_params_clear_disabled(hdev);
5833 for (i = 0; i < param_count; i++) {
5834 struct mgmt_conn_param *param = &cp->params[i];
5835 struct hci_conn_params *hci_param;
5836 u16 min, max, latency, timeout;
5839 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5842 if (param->addr.type == BDADDR_LE_PUBLIC) {
5843 addr_type = ADDR_LE_DEV_PUBLIC;
5844 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5845 addr_type = ADDR_LE_DEV_RANDOM;
5847 BT_ERR("Ignoring invalid connection parameters");
5851 min = le16_to_cpu(param->min_interval);
5852 max = le16_to_cpu(param->max_interval);
5853 latency = le16_to_cpu(param->latency);
5854 timeout = le16_to_cpu(param->timeout);
5856 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5857 min, max, latency, timeout);
5859 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5860 BT_ERR("Ignoring invalid connection parameters");
5864 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5867 BT_ERR("Failed to add connection parameters");
5871 hci_param->conn_min_interval = min;
5872 hci_param->conn_max_interval = max;
5873 hci_param->conn_latency = latency;
5874 hci_param->supervision_timeout = timeout;
5877 hci_dev_unlock(hdev);
5879 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5882 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5883 void *data, u16 len)
5885 struct mgmt_cp_set_external_config *cp = data;
5889 BT_DBG("%s", hdev->name);
5891 if (hdev_is_powered(hdev))
5892 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5893 MGMT_STATUS_REJECTED);
5895 if (cp->config != 0x00 && cp->config != 0x01)
5896 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5897 MGMT_STATUS_INVALID_PARAMS);
5899 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5900 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5901 MGMT_STATUS_NOT_SUPPORTED);
5906 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5909 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5912 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5919 err = new_options(hdev, sk);
5921 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5922 mgmt_index_removed(hdev);
5924 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5925 set_bit(HCI_CONFIG, &hdev->dev_flags);
5926 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5928 queue_work(hdev->req_workqueue, &hdev->power_on);
5930 set_bit(HCI_RAW, &hdev->flags);
5931 mgmt_index_added(hdev);
5936 hci_dev_unlock(hdev);
5940 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5941 void *data, u16 len)
5943 struct mgmt_cp_set_public_address *cp = data;
5947 BT_DBG("%s", hdev->name);
5949 if (hdev_is_powered(hdev))
5950 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5951 MGMT_STATUS_REJECTED);
5953 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5954 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5955 MGMT_STATUS_INVALID_PARAMS);
5957 if (!hdev->set_bdaddr)
5958 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5959 MGMT_STATUS_NOT_SUPPORTED);
5963 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5964 bacpy(&hdev->public_addr, &cp->bdaddr);
5966 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5973 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5974 err = new_options(hdev, sk);
5976 if (is_configured(hdev)) {
5977 mgmt_index_removed(hdev);
5979 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5981 set_bit(HCI_CONFIG, &hdev->dev_flags);
5982 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5984 queue_work(hdev->req_workqueue, &hdev->power_on);
5988 hci_dev_unlock(hdev);
5992 static const struct mgmt_handler {
5993 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5997 } mgmt_handlers[] = {
5998 { NULL }, /* 0x0000 (no command) */
5999 { read_version, false, MGMT_READ_VERSION_SIZE },
6000 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
6001 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
6002 { read_controller_info, false, MGMT_READ_INFO_SIZE },
6003 { set_powered, false, MGMT_SETTING_SIZE },
6004 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
6005 { set_connectable, false, MGMT_SETTING_SIZE },
6006 { set_fast_connectable, false, MGMT_SETTING_SIZE },
6007 { set_bondable, false, MGMT_SETTING_SIZE },
6008 { set_link_security, false, MGMT_SETTING_SIZE },
6009 { set_ssp, false, MGMT_SETTING_SIZE },
6010 { set_hs, false, MGMT_SETTING_SIZE },
6011 { set_le, false, MGMT_SETTING_SIZE },
6012 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
6013 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
6014 { add_uuid, false, MGMT_ADD_UUID_SIZE },
6015 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
6016 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
6017 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
6018 { disconnect, false, MGMT_DISCONNECT_SIZE },
6019 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
6020 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
6021 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6022 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
6023 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
6024 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6025 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
6026 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
6027 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6028 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
6029 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6030 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6031 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
6032 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6033 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
6034 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
6035 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
6036 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
6037 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
6038 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
6039 { set_advertising, false, MGMT_SETTING_SIZE },
6040 { set_bredr, false, MGMT_SETTING_SIZE },
6041 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
6042 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
6043 { set_secure_conn, false, MGMT_SETTING_SIZE },
6044 { set_debug_keys, false, MGMT_SETTING_SIZE },
6045 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
6046 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
6047 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
6048 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
6049 { add_device, false, MGMT_ADD_DEVICE_SIZE },
6050 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
6051 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
6052 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
6053 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
6054 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
6055 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
6056 { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
6059 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
6063 struct mgmt_hdr *hdr;
6064 u16 opcode, index, len;
6065 struct hci_dev *hdev = NULL;
6066 const struct mgmt_handler *handler;
6069 BT_DBG("got %zu bytes", msglen);
6071 if (msglen < sizeof(*hdr))
6074 buf = kmalloc(msglen, GFP_KERNEL);
6078 if (memcpy_from_msg(buf, msg, msglen)) {
6084 opcode = __le16_to_cpu(hdr->opcode);
6085 index = __le16_to_cpu(hdr->index);
6086 len = __le16_to_cpu(hdr->len);
6088 if (len != msglen - sizeof(*hdr)) {
6093 if (index != MGMT_INDEX_NONE) {
6094 hdev = hci_dev_get(index);
6096 err = cmd_status(sk, index, opcode,
6097 MGMT_STATUS_INVALID_INDEX);
6101 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
6102 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
6103 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
6104 err = cmd_status(sk, index, opcode,
6105 MGMT_STATUS_INVALID_INDEX);
6109 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
6110 opcode != MGMT_OP_READ_CONFIG_INFO &&
6111 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
6112 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
6113 err = cmd_status(sk, index, opcode,
6114 MGMT_STATUS_INVALID_INDEX);
6119 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
6120 mgmt_handlers[opcode].func == NULL) {
6121 BT_DBG("Unknown op %u", opcode);
6122 err = cmd_status(sk, index, opcode,
6123 MGMT_STATUS_UNKNOWN_COMMAND);
6127 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
6128 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6129 err = cmd_status(sk, index, opcode,
6130 MGMT_STATUS_INVALID_INDEX);
6134 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
6135 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6136 err = cmd_status(sk, index, opcode,
6137 MGMT_STATUS_INVALID_INDEX);
6141 handler = &mgmt_handlers[opcode];
6143 if ((handler->var_len && len < handler->data_len) ||
6144 (!handler->var_len && len != handler->data_len)) {
6145 err = cmd_status(sk, index, opcode,
6146 MGMT_STATUS_INVALID_PARAMS);
6151 mgmt_init_hdev(sk, hdev);
6153 cp = buf + sizeof(*hdr);
6155 err = handler->func(sk, hdev, cp, len);
6169 void mgmt_index_added(struct hci_dev *hdev)
6171 if (hdev->dev_type != HCI_BREDR)
6174 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6177 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6178 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6180 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6183 void mgmt_index_removed(struct hci_dev *hdev)
6185 u8 status = MGMT_STATUS_INVALID_INDEX;
6187 if (hdev->dev_type != HCI_BREDR)
6190 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6193 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6195 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6196 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6198 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6201 /* This function requires the caller holds hdev->lock */
6202 static void restart_le_actions(struct hci_request *req)
6204 struct hci_dev *hdev = req->hdev;
6205 struct hci_conn_params *p;
6207 list_for_each_entry(p, &hdev->le_conn_params, list) {
6208 /* Needed for AUTO_OFF case where might not "really"
6209 * have been powered off.
6211 list_del_init(&p->action);
6213 switch (p->auto_connect) {
6214 case HCI_AUTO_CONN_DIRECT:
6215 case HCI_AUTO_CONN_ALWAYS:
6216 list_add(&p->action, &hdev->pend_le_conns);
6218 case HCI_AUTO_CONN_REPORT:
6219 list_add(&p->action, &hdev->pend_le_reports);
6226 __hci_update_background_scan(req);
6229 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6231 struct cmd_lookup match = { NULL, hdev };
6233 BT_DBG("status 0x%02x", status);
6236 /* Register the available SMP channels (BR/EDR and LE) only
6237 * when successfully powering on the controller. This late
6238 * registration is required so that LE SMP can clearly
6239 * decide if the public address or static address is used.
6246 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6248 new_settings(hdev, match.sk);
6250 hci_dev_unlock(hdev);
6256 static int powered_update_hci(struct hci_dev *hdev)
6258 struct hci_request req;
6261 hci_req_init(&req, hdev);
6263 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6264 !lmp_host_ssp_capable(hdev)) {
6267 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
6270 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6272 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, sizeof(sc), &sc);
6275 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6276 lmp_bredr_capable(hdev)) {
6277 struct hci_cp_write_le_host_supported cp;
6282 /* Check first if we already have the right
6283 * host state (host features set)
6285 if (cp.le != lmp_host_le_capable(hdev) ||
6286 cp.simul != lmp_host_le_br_capable(hdev))
6287 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6291 if (lmp_le_capable(hdev)) {
6292 /* Make sure the controller has a good default for
6293 * advertising data. This also applies to the case
6294 * where BR/EDR was toggled during the AUTO_OFF phase.
6296 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6297 update_adv_data(&req);
6298 update_scan_rsp_data(&req);
6301 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6302 enable_advertising(&req);
6304 restart_le_actions(&req);
6307 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6308 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6309 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6310 sizeof(link_sec), &link_sec);
6312 if (lmp_bredr_capable(hdev)) {
6313 write_fast_connectable(&req, false);
6314 __hci_update_page_scan(&req);
6320 return hci_req_run(&req, powered_complete);
6323 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6325 struct cmd_lookup match = { NULL, hdev };
6326 u8 status, zero_cod[] = { 0, 0, 0 };
6329 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6333 if (powered_update_hci(hdev) == 0)
6336 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6341 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6343 /* If the power off is because of hdev unregistration let
6344 * use the appropriate INVALID_INDEX status. Otherwise use
6345 * NOT_POWERED. We cover both scenarios here since later in
6346 * mgmt_index_removed() any hci_conn callbacks will have already
6347 * been triggered, potentially causing misleading DISCONNECTED
6350 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
6351 status = MGMT_STATUS_INVALID_INDEX;
6353 status = MGMT_STATUS_NOT_POWERED;
6355 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6357 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6358 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6359 zero_cod, sizeof(zero_cod), NULL);
6362 err = new_settings(hdev, match.sk);
6370 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6372 struct pending_cmd *cmd;
6375 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6379 if (err == -ERFKILL)
6380 status = MGMT_STATUS_RFKILLED;
6382 status = MGMT_STATUS_FAILED;
6384 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6386 mgmt_pending_remove(cmd);
6389 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6391 struct hci_request req;
6395 /* When discoverable timeout triggers, then just make sure
6396 * the limited discoverable flag is cleared. Even in the case
6397 * of a timeout triggered from general discoverable, it is
6398 * safe to unconditionally clear the flag.
6400 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6401 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6403 hci_req_init(&req, hdev);
6404 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6405 u8 scan = SCAN_PAGE;
6406 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6407 sizeof(scan), &scan);
6410 update_adv_data(&req);
6411 hci_req_run(&req, NULL);
6413 hdev->discov_timeout = 0;
6415 new_settings(hdev, NULL);
6417 hci_dev_unlock(hdev);
6420 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6423 struct mgmt_ev_new_link_key ev;
6425 memset(&ev, 0, sizeof(ev));
6427 ev.store_hint = persistent;
6428 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6429 ev.key.addr.type = BDADDR_BREDR;
6430 ev.key.type = key->type;
6431 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6432 ev.key.pin_len = key->pin_len;
6434 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6437 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6439 switch (ltk->type) {
6442 if (ltk->authenticated)
6443 return MGMT_LTK_AUTHENTICATED;
6444 return MGMT_LTK_UNAUTHENTICATED;
6446 if (ltk->authenticated)
6447 return MGMT_LTK_P256_AUTH;
6448 return MGMT_LTK_P256_UNAUTH;
6449 case SMP_LTK_P256_DEBUG:
6450 return MGMT_LTK_P256_DEBUG;
6453 return MGMT_LTK_UNAUTHENTICATED;
6456 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6458 struct mgmt_ev_new_long_term_key ev;
6460 memset(&ev, 0, sizeof(ev));
6462 /* Devices using resolvable or non-resolvable random addresses
6463 * without providing an indentity resolving key don't require
6464 * to store long term keys. Their addresses will change the
6467 * Only when a remote device provides an identity address
6468 * make sure the long term key is stored. If the remote
6469 * identity is known, the long term keys are internally
6470 * mapped to the identity address. So allow static random
6471 * and public addresses here.
6473 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6474 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6475 ev.store_hint = 0x00;
6477 ev.store_hint = persistent;
6479 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6480 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6481 ev.key.type = mgmt_ltk_type(key);
6482 ev.key.enc_size = key->enc_size;
6483 ev.key.ediv = key->ediv;
6484 ev.key.rand = key->rand;
6486 if (key->type == SMP_LTK)
6489 memcpy(ev.key.val, key->val, sizeof(key->val));
6491 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6494 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6496 struct mgmt_ev_new_irk ev;
6498 memset(&ev, 0, sizeof(ev));
6500 /* For identity resolving keys from devices that are already
6501 * using a public address or static random address, do not
6502 * ask for storing this key. The identity resolving key really
6503 * is only mandatory for devices using resovlable random
6506 * Storing all identity resolving keys has the downside that
6507 * they will be also loaded on next boot of they system. More
6508 * identity resolving keys, means more time during scanning is
6509 * needed to actually resolve these addresses.
6511 if (bacmp(&irk->rpa, BDADDR_ANY))
6512 ev.store_hint = 0x01;
6514 ev.store_hint = 0x00;
6516 bacpy(&ev.rpa, &irk->rpa);
6517 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6518 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6519 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6521 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6524 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6527 struct mgmt_ev_new_csrk ev;
6529 memset(&ev, 0, sizeof(ev));
6531 /* Devices using resolvable or non-resolvable random addresses
6532 * without providing an indentity resolving key don't require
6533 * to store signature resolving keys. Their addresses will change
6534 * the next time around.
6536 * Only when a remote device provides an identity address
6537 * make sure the signature resolving key is stored. So allow
6538 * static random and public addresses here.
6540 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6541 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6542 ev.store_hint = 0x00;
6544 ev.store_hint = persistent;
6546 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6547 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6548 ev.key.master = csrk->master;
6549 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6551 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6554 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6555 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6556 u16 max_interval, u16 latency, u16 timeout)
6558 struct mgmt_ev_new_conn_param ev;
6560 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6563 memset(&ev, 0, sizeof(ev));
6564 bacpy(&ev.addr.bdaddr, bdaddr);
6565 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6566 ev.store_hint = store_hint;
6567 ev.min_interval = cpu_to_le16(min_interval);
6568 ev.max_interval = cpu_to_le16(max_interval);
6569 ev.latency = cpu_to_le16(latency);
6570 ev.timeout = cpu_to_le16(timeout);
6572 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6575 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6578 eir[eir_len++] = sizeof(type) + data_len;
6579 eir[eir_len++] = type;
6580 memcpy(&eir[eir_len], data, data_len);
6581 eir_len += data_len;
6586 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6587 u32 flags, u8 *name, u8 name_len)
6590 struct mgmt_ev_device_connected *ev = (void *) buf;
6593 bacpy(&ev->addr.bdaddr, &conn->dst);
6594 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6596 ev->flags = __cpu_to_le32(flags);
6598 /* We must ensure that the EIR Data fields are ordered and
6599 * unique. Keep it simple for now and avoid the problem by not
6600 * adding any BR/EDR data to the LE adv.
6602 if (conn->le_adv_data_len > 0) {
6603 memcpy(&ev->eir[eir_len],
6604 conn->le_adv_data, conn->le_adv_data_len);
6605 eir_len = conn->le_adv_data_len;
6608 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6611 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6612 eir_len = eir_append_data(ev->eir, eir_len,
6614 conn->dev_class, 3);
6617 ev->eir_len = cpu_to_le16(eir_len);
6619 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6620 sizeof(*ev) + eir_len, NULL);
6623 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6625 struct sock **sk = data;
6627 cmd->cmd_complete(cmd, 0);
6632 mgmt_pending_remove(cmd);
6635 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6637 struct hci_dev *hdev = data;
6638 struct mgmt_cp_unpair_device *cp = cmd->param;
6640 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6642 cmd->cmd_complete(cmd, 0);
6643 mgmt_pending_remove(cmd);
6646 bool mgmt_powering_down(struct hci_dev *hdev)
6648 struct pending_cmd *cmd;
6649 struct mgmt_mode *cp;
6651 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6662 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6663 u8 link_type, u8 addr_type, u8 reason,
6664 bool mgmt_connected)
6666 struct mgmt_ev_device_disconnected ev;
6667 struct sock *sk = NULL;
6669 /* The connection is still in hci_conn_hash so test for 1
6670 * instead of 0 to know if this is the last one.
6672 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6673 cancel_delayed_work(&hdev->power_off);
6674 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6677 if (!mgmt_connected)
6680 if (link_type != ACL_LINK && link_type != LE_LINK)
6683 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6685 bacpy(&ev.addr.bdaddr, bdaddr);
6686 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6689 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6694 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6698 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6699 u8 link_type, u8 addr_type, u8 status)
6701 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6702 struct mgmt_cp_disconnect *cp;
6703 struct pending_cmd *cmd;
6705 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6708 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6714 if (bacmp(bdaddr, &cp->addr.bdaddr))
6717 if (cp->addr.type != bdaddr_type)
6720 cmd->cmd_complete(cmd, mgmt_status(status));
6721 mgmt_pending_remove(cmd);
6724 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6725 u8 addr_type, u8 status)
6727 struct mgmt_ev_connect_failed ev;
6729 /* The connection is still in hci_conn_hash so test for 1
6730 * instead of 0 to know if this is the last one.
6732 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6733 cancel_delayed_work(&hdev->power_off);
6734 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6737 bacpy(&ev.addr.bdaddr, bdaddr);
6738 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6739 ev.status = mgmt_status(status);
6741 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6744 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6746 struct mgmt_ev_pin_code_request ev;
6748 bacpy(&ev.addr.bdaddr, bdaddr);
6749 ev.addr.type = BDADDR_BREDR;
6752 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6755 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6758 struct pending_cmd *cmd;
6760 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6764 cmd->cmd_complete(cmd, mgmt_status(status));
6765 mgmt_pending_remove(cmd);
6768 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6771 struct pending_cmd *cmd;
6773 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6777 cmd->cmd_complete(cmd, mgmt_status(status));
6778 mgmt_pending_remove(cmd);
6781 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6782 u8 link_type, u8 addr_type, u32 value,
6785 struct mgmt_ev_user_confirm_request ev;
6787 BT_DBG("%s", hdev->name);
6789 bacpy(&ev.addr.bdaddr, bdaddr);
6790 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6791 ev.confirm_hint = confirm_hint;
6792 ev.value = cpu_to_le32(value);
6794 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6798 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6799 u8 link_type, u8 addr_type)
6801 struct mgmt_ev_user_passkey_request ev;
6803 BT_DBG("%s", hdev->name);
6805 bacpy(&ev.addr.bdaddr, bdaddr);
6806 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6808 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6812 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6813 u8 link_type, u8 addr_type, u8 status,
6816 struct pending_cmd *cmd;
6818 cmd = mgmt_pending_find(opcode, hdev);
6822 cmd->cmd_complete(cmd, mgmt_status(status));
6823 mgmt_pending_remove(cmd);
6828 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6829 u8 link_type, u8 addr_type, u8 status)
6831 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6832 status, MGMT_OP_USER_CONFIRM_REPLY);
6835 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6836 u8 link_type, u8 addr_type, u8 status)
6838 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6840 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6843 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6844 u8 link_type, u8 addr_type, u8 status)
6846 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6847 status, MGMT_OP_USER_PASSKEY_REPLY);
6850 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6851 u8 link_type, u8 addr_type, u8 status)
6853 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6855 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6858 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6859 u8 link_type, u8 addr_type, u32 passkey,
6862 struct mgmt_ev_passkey_notify ev;
6864 BT_DBG("%s", hdev->name);
6866 bacpy(&ev.addr.bdaddr, bdaddr);
6867 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6868 ev.passkey = __cpu_to_le32(passkey);
6869 ev.entered = entered;
6871 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6874 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6876 struct mgmt_ev_auth_failed ev;
6877 struct pending_cmd *cmd;
6878 u8 status = mgmt_status(hci_status);
6880 bacpy(&ev.addr.bdaddr, &conn->dst);
6881 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6884 cmd = find_pairing(conn);
6886 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6887 cmd ? cmd->sk : NULL);
6890 cmd->cmd_complete(cmd, status);
6891 mgmt_pending_remove(cmd);
6895 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6897 struct cmd_lookup match = { NULL, hdev };
6901 u8 mgmt_err = mgmt_status(status);
6902 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6903 cmd_status_rsp, &mgmt_err);
6907 if (test_bit(HCI_AUTH, &hdev->flags))
6908 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6911 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6914 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6918 new_settings(hdev, match.sk);
6924 static void clear_eir(struct hci_request *req)
6926 struct hci_dev *hdev = req->hdev;
6927 struct hci_cp_write_eir cp;
6929 if (!lmp_ext_inq_capable(hdev))
6932 memset(hdev->eir, 0, sizeof(hdev->eir));
6934 memset(&cp, 0, sizeof(cp));
6936 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6939 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6941 struct cmd_lookup match = { NULL, hdev };
6942 struct hci_request req;
6943 bool changed = false;
6946 u8 mgmt_err = mgmt_status(status);
6948 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6949 &hdev->dev_flags)) {
6950 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6951 new_settings(hdev, NULL);
6954 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6960 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6962 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6964 changed = test_and_clear_bit(HCI_HS_ENABLED,
6967 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6970 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6973 new_settings(hdev, match.sk);
6978 hci_req_init(&req, hdev);
6980 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6981 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6982 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6983 sizeof(enable), &enable);
6989 hci_req_run(&req, NULL);
6992 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6994 struct cmd_lookup match = { NULL, hdev };
6995 bool changed = false;
6998 u8 mgmt_err = mgmt_status(status);
7001 if (test_and_clear_bit(HCI_SC_ENABLED,
7003 new_settings(hdev, NULL);
7004 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
7007 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
7008 cmd_status_rsp, &mgmt_err);
7013 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
7015 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
7016 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
7019 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
7020 settings_rsp, &match);
7023 new_settings(hdev, match.sk);
7029 static void sk_lookup(struct pending_cmd *cmd, void *data)
7031 struct cmd_lookup *match = data;
7033 if (match->sk == NULL) {
7034 match->sk = cmd->sk;
7035 sock_hold(match->sk);
7039 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7042 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7044 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7045 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7046 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7049 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
7056 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7058 struct mgmt_cp_set_local_name ev;
7059 struct pending_cmd *cmd;
7064 memset(&ev, 0, sizeof(ev));
7065 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7066 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7068 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7070 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7072 /* If this is a HCI command related to powering on the
7073 * HCI dev don't send any mgmt signals.
7075 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
7079 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7080 cmd ? cmd->sk : NULL);
7083 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7084 u8 *rand192, u8 *hash256, u8 *rand256,
7087 struct pending_cmd *cmd;
7089 BT_DBG("%s status %u", hdev->name, status);
7091 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7096 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7097 mgmt_status(status));
7099 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7100 struct mgmt_rp_read_local_oob_ext_data rp;
7102 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7103 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7105 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7106 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7108 cmd_complete(cmd->sk, hdev->id,
7109 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7112 struct mgmt_rp_read_local_oob_data rp;
7114 memcpy(rp.hash, hash192, sizeof(rp.hash));
7115 memcpy(rp.rand, rand192, sizeof(rp.rand));
7117 cmd_complete(cmd->sk, hdev->id,
7118 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7123 mgmt_pending_remove(cmd);
7126 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7130 for (i = 0; i < uuid_count; i++) {
7131 if (!memcmp(uuid, uuids[i], 16))
7138 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7142 while (parsed < eir_len) {
7143 u8 field_len = eir[0];
7150 if (eir_len - parsed < field_len + 1)
7154 case EIR_UUID16_ALL:
7155 case EIR_UUID16_SOME:
7156 for (i = 0; i + 3 <= field_len; i += 2) {
7157 memcpy(uuid, bluetooth_base_uuid, 16);
7158 uuid[13] = eir[i + 3];
7159 uuid[12] = eir[i + 2];
7160 if (has_uuid(uuid, uuid_count, uuids))
7164 case EIR_UUID32_ALL:
7165 case EIR_UUID32_SOME:
7166 for (i = 0; i + 5 <= field_len; i += 4) {
7167 memcpy(uuid, bluetooth_base_uuid, 16);
7168 uuid[15] = eir[i + 5];
7169 uuid[14] = eir[i + 4];
7170 uuid[13] = eir[i + 3];
7171 uuid[12] = eir[i + 2];
7172 if (has_uuid(uuid, uuid_count, uuids))
7176 case EIR_UUID128_ALL:
7177 case EIR_UUID128_SOME:
7178 for (i = 0; i + 17 <= field_len; i += 16) {
7179 memcpy(uuid, eir + i + 2, 16);
7180 if (has_uuid(uuid, uuid_count, uuids))
7186 parsed += field_len + 1;
7187 eir += field_len + 1;
7193 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7194 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7195 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7198 struct mgmt_ev_device_found *ev = (void *) buf;
7202 /* Don't send events for a non-kernel initiated discovery. With
7203 * LE one exception is if we have pend_le_reports > 0 in which
7204 * case we're doing passive scanning and want these events.
7206 if (!hci_discovery_active(hdev)) {
7207 if (link_type == ACL_LINK)
7209 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7213 /* When using service discovery with a RSSI threshold, then check
7214 * if such a RSSI threshold is specified. If a RSSI threshold has
7215 * been specified, then all results with a RSSI smaller than the
7216 * RSSI threshold will be dropped.
7218 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7219 * the results are also dropped.
7221 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7222 (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID))
7225 /* Make sure that the buffer is big enough. The 5 extra bytes
7226 * are for the potential CoD field.
7228 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7231 memset(buf, 0, sizeof(buf));
7233 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7234 * RSSI value was reported as 0 when not available. This behavior
7235 * is kept when using device discovery. This is required for full
7236 * backwards compatibility with the API.
7238 * However when using service discovery, the value 127 will be
7239 * returned when the RSSI is not available.
7241 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi)
7244 bacpy(&ev->addr.bdaddr, bdaddr);
7245 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7247 ev->flags = cpu_to_le32(flags);
7250 /* When using service discovery and a list of UUID is
7251 * provided, results with no matching UUID should be
7252 * dropped. In case there is a match the result is
7253 * kept and checking possible scan response data
7256 if (hdev->discovery.uuid_count > 0)
7257 match = eir_has_uuids(eir, eir_len,
7258 hdev->discovery.uuid_count,
7259 hdev->discovery.uuids);
7263 if (!match && !scan_rsp_len)
7266 /* Copy EIR or advertising data into event */
7267 memcpy(ev->eir, eir, eir_len);
7269 /* When using service discovery and a list of UUID is
7270 * provided, results with empty EIR or advertising data
7271 * should be dropped since they do not match any UUID.
7273 if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
7279 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7280 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7283 if (scan_rsp_len > 0) {
7284 /* When using service discovery and a list of UUID is
7285 * provided, results with no matching UUID should be
7286 * dropped if there is no previous match from the
7289 if (hdev->discovery.uuid_count > 0) {
7290 if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
7291 hdev->discovery.uuid_count,
7292 hdev->discovery.uuids))
7296 /* Append scan response data to event */
7297 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7299 /* When using service discovery and a list of UUID is
7300 * provided, results with empty scan response and no
7301 * previous matched advertising data should be dropped.
7303 if (hdev->discovery.uuid_count > 0 && !match)
7307 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7308 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7310 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7313 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7314 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7316 struct mgmt_ev_device_found *ev;
7317 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7320 ev = (struct mgmt_ev_device_found *) buf;
7322 memset(buf, 0, sizeof(buf));
7324 bacpy(&ev->addr.bdaddr, bdaddr);
7325 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7328 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7331 ev->eir_len = cpu_to_le16(eir_len);
7333 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7336 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7338 struct mgmt_ev_discovering ev;
7340 BT_DBG("%s discovering %u", hdev->name, discovering);
7342 memset(&ev, 0, sizeof(ev));
7343 ev.type = hdev->discovery.type;
7344 ev.discovering = discovering;
7346 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7349 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7351 BT_DBG("%s status %u", hdev->name, status);
7354 void mgmt_reenable_advertising(struct hci_dev *hdev)
7356 struct hci_request req;
7358 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7361 hci_req_init(&req, hdev);
7362 enable_advertising(&req);
7363 hci_req_run(&req, adv_enable_complete);