2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "hci_request.h"
38 #define MGMT_VERSION 1
39 #define MGMT_REVISION 8
41 static const u16 mgmt_commands[] = {
42 MGMT_OP_READ_INDEX_LIST,
45 MGMT_OP_SET_DISCOVERABLE,
46 MGMT_OP_SET_CONNECTABLE,
47 MGMT_OP_SET_FAST_CONNECTABLE,
49 MGMT_OP_SET_LINK_SECURITY,
53 MGMT_OP_SET_DEV_CLASS,
54 MGMT_OP_SET_LOCAL_NAME,
57 MGMT_OP_LOAD_LINK_KEYS,
58 MGMT_OP_LOAD_LONG_TERM_KEYS,
60 MGMT_OP_GET_CONNECTIONS,
61 MGMT_OP_PIN_CODE_REPLY,
62 MGMT_OP_PIN_CODE_NEG_REPLY,
63 MGMT_OP_SET_IO_CAPABILITY,
65 MGMT_OP_CANCEL_PAIR_DEVICE,
66 MGMT_OP_UNPAIR_DEVICE,
67 MGMT_OP_USER_CONFIRM_REPLY,
68 MGMT_OP_USER_CONFIRM_NEG_REPLY,
69 MGMT_OP_USER_PASSKEY_REPLY,
70 MGMT_OP_USER_PASSKEY_NEG_REPLY,
71 MGMT_OP_READ_LOCAL_OOB_DATA,
72 MGMT_OP_ADD_REMOTE_OOB_DATA,
73 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
74 MGMT_OP_START_DISCOVERY,
75 MGMT_OP_STOP_DISCOVERY,
78 MGMT_OP_UNBLOCK_DEVICE,
79 MGMT_OP_SET_DEVICE_ID,
80 MGMT_OP_SET_ADVERTISING,
82 MGMT_OP_SET_STATIC_ADDRESS,
83 MGMT_OP_SET_SCAN_PARAMS,
84 MGMT_OP_SET_SECURE_CONN,
85 MGMT_OP_SET_DEBUG_KEYS,
88 MGMT_OP_GET_CONN_INFO,
89 MGMT_OP_GET_CLOCK_INFO,
91 MGMT_OP_REMOVE_DEVICE,
92 MGMT_OP_LOAD_CONN_PARAM,
93 MGMT_OP_READ_UNCONF_INDEX_LIST,
94 MGMT_OP_READ_CONFIG_INFO,
95 MGMT_OP_SET_EXTERNAL_CONFIG,
96 MGMT_OP_SET_PUBLIC_ADDRESS,
97 MGMT_OP_START_SERVICE_DISCOVERY,
100 static const u16 mgmt_events[] = {
101 MGMT_EV_CONTROLLER_ERROR,
103 MGMT_EV_INDEX_REMOVED,
104 MGMT_EV_NEW_SETTINGS,
105 MGMT_EV_CLASS_OF_DEV_CHANGED,
106 MGMT_EV_LOCAL_NAME_CHANGED,
107 MGMT_EV_NEW_LINK_KEY,
108 MGMT_EV_NEW_LONG_TERM_KEY,
109 MGMT_EV_DEVICE_CONNECTED,
110 MGMT_EV_DEVICE_DISCONNECTED,
111 MGMT_EV_CONNECT_FAILED,
112 MGMT_EV_PIN_CODE_REQUEST,
113 MGMT_EV_USER_CONFIRM_REQUEST,
114 MGMT_EV_USER_PASSKEY_REQUEST,
116 MGMT_EV_DEVICE_FOUND,
118 MGMT_EV_DEVICE_BLOCKED,
119 MGMT_EV_DEVICE_UNBLOCKED,
120 MGMT_EV_DEVICE_UNPAIRED,
121 MGMT_EV_PASSKEY_NOTIFY,
124 MGMT_EV_DEVICE_ADDED,
125 MGMT_EV_DEVICE_REMOVED,
126 MGMT_EV_NEW_CONN_PARAM,
127 MGMT_EV_UNCONF_INDEX_ADDED,
128 MGMT_EV_UNCONF_INDEX_REMOVED,
129 MGMT_EV_NEW_CONFIG_OPTIONS,
132 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
135 struct list_head list;
142 int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
145 /* HCI to MGMT error code conversion table */
146 static u8 mgmt_status_table[] = {
148 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
149 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
150 MGMT_STATUS_FAILED, /* Hardware Failure */
151 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
152 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
153 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
154 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
155 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
157 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
158 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
159 MGMT_STATUS_BUSY, /* Command Disallowed */
160 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
161 MGMT_STATUS_REJECTED, /* Rejected Security */
162 MGMT_STATUS_REJECTED, /* Rejected Personal */
163 MGMT_STATUS_TIMEOUT, /* Host Timeout */
164 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
165 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
166 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
167 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
168 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
169 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
170 MGMT_STATUS_BUSY, /* Repeated Attempts */
171 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
172 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
173 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
174 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
175 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
176 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
177 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
178 MGMT_STATUS_FAILED, /* Unspecified Error */
179 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
180 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
181 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
182 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
183 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
184 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
185 MGMT_STATUS_FAILED, /* Unit Link Key Used */
186 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
187 MGMT_STATUS_TIMEOUT, /* Instant Passed */
188 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
189 MGMT_STATUS_FAILED, /* Transaction Collision */
190 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
191 MGMT_STATUS_REJECTED, /* QoS Rejected */
192 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
193 MGMT_STATUS_REJECTED, /* Insufficient Security */
194 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
195 MGMT_STATUS_BUSY, /* Role Switch Pending */
196 MGMT_STATUS_FAILED, /* Slot Violation */
197 MGMT_STATUS_FAILED, /* Role Switch Failed */
198 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
199 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
200 MGMT_STATUS_BUSY, /* Host Busy Pairing */
201 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
202 MGMT_STATUS_BUSY, /* Controller Busy */
203 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
204 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
205 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
206 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
207 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
210 static u8 mgmt_status(u8 hci_status)
212 if (hci_status < ARRAY_SIZE(mgmt_status_table))
213 return mgmt_status_table[hci_status];
215 return MGMT_STATUS_FAILED;
218 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
219 struct sock *skip_sk)
222 struct mgmt_hdr *hdr;
224 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
228 hdr = (void *) skb_put(skb, sizeof(*hdr));
229 hdr->opcode = cpu_to_le16(event);
231 hdr->index = cpu_to_le16(hdev->id);
233 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
234 hdr->len = cpu_to_le16(data_len);
237 memcpy(skb_put(skb, data_len), data, data_len);
240 __net_timestamp(skb);
242 hci_send_to_control(skb, skip_sk);
248 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
251 struct mgmt_hdr *hdr;
252 struct mgmt_ev_cmd_status *ev;
255 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
257 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
261 hdr = (void *) skb_put(skb, sizeof(*hdr));
263 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
264 hdr->index = cpu_to_le16(index);
265 hdr->len = cpu_to_le16(sizeof(*ev));
267 ev = (void *) skb_put(skb, sizeof(*ev));
269 ev->opcode = cpu_to_le16(cmd);
271 err = sock_queue_rcv_skb(sk, skb);
278 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
279 void *rp, size_t rp_len)
282 struct mgmt_hdr *hdr;
283 struct mgmt_ev_cmd_complete *ev;
286 BT_DBG("sock %p", sk);
288 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
292 hdr = (void *) skb_put(skb, sizeof(*hdr));
294 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
295 hdr->index = cpu_to_le16(index);
296 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
298 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
299 ev->opcode = cpu_to_le16(cmd);
303 memcpy(ev->data, rp, rp_len);
305 err = sock_queue_rcv_skb(sk, skb);
312 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
315 struct mgmt_rp_read_version rp;
317 BT_DBG("sock %p", sk);
319 rp.version = MGMT_VERSION;
320 rp.revision = cpu_to_le16(MGMT_REVISION);
322 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
326 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
329 struct mgmt_rp_read_commands *rp;
330 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
331 const u16 num_events = ARRAY_SIZE(mgmt_events);
336 BT_DBG("sock %p", sk);
338 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
340 rp = kmalloc(rp_size, GFP_KERNEL);
344 rp->num_commands = cpu_to_le16(num_commands);
345 rp->num_events = cpu_to_le16(num_events);
347 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
348 put_unaligned_le16(mgmt_commands[i], opcode);
350 for (i = 0; i < num_events; i++, opcode++)
351 put_unaligned_le16(mgmt_events[i], opcode);
353 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
360 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
363 struct mgmt_rp_read_index_list *rp;
369 BT_DBG("sock %p", sk);
371 read_lock(&hci_dev_list_lock);
374 list_for_each_entry(d, &hci_dev_list, list) {
375 if (d->dev_type == HCI_BREDR &&
376 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
380 rp_len = sizeof(*rp) + (2 * count);
381 rp = kmalloc(rp_len, GFP_ATOMIC);
383 read_unlock(&hci_dev_list_lock);
388 list_for_each_entry(d, &hci_dev_list, list) {
389 if (test_bit(HCI_SETUP, &d->dev_flags) ||
390 test_bit(HCI_CONFIG, &d->dev_flags) ||
391 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
394 /* Devices marked as raw-only are neither configured
395 * nor unconfigured controllers.
397 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
400 if (d->dev_type == HCI_BREDR &&
401 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
402 rp->index[count++] = cpu_to_le16(d->id);
403 BT_DBG("Added hci%u", d->id);
407 rp->num_controllers = cpu_to_le16(count);
408 rp_len = sizeof(*rp) + (2 * count);
410 read_unlock(&hci_dev_list_lock);
412 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
420 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
421 void *data, u16 data_len)
423 struct mgmt_rp_read_unconf_index_list *rp;
429 BT_DBG("sock %p", sk);
431 read_lock(&hci_dev_list_lock);
434 list_for_each_entry(d, &hci_dev_list, list) {
435 if (d->dev_type == HCI_BREDR &&
436 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
440 rp_len = sizeof(*rp) + (2 * count);
441 rp = kmalloc(rp_len, GFP_ATOMIC);
443 read_unlock(&hci_dev_list_lock);
448 list_for_each_entry(d, &hci_dev_list, list) {
449 if (test_bit(HCI_SETUP, &d->dev_flags) ||
450 test_bit(HCI_CONFIG, &d->dev_flags) ||
451 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
454 /* Devices marked as raw-only are neither configured
455 * nor unconfigured controllers.
457 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
460 if (d->dev_type == HCI_BREDR &&
461 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
462 rp->index[count++] = cpu_to_le16(d->id);
463 BT_DBG("Added hci%u", d->id);
467 rp->num_controllers = cpu_to_le16(count);
468 rp_len = sizeof(*rp) + (2 * count);
470 read_unlock(&hci_dev_list_lock);
472 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
480 static bool is_configured(struct hci_dev *hdev)
482 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
483 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
486 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
487 !bacmp(&hdev->public_addr, BDADDR_ANY))
493 static __le32 get_missing_options(struct hci_dev *hdev)
497 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
498 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
499 options |= MGMT_OPTION_EXTERNAL_CONFIG;
501 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
502 !bacmp(&hdev->public_addr, BDADDR_ANY))
503 options |= MGMT_OPTION_PUBLIC_ADDRESS;
505 return cpu_to_le32(options);
508 static int new_options(struct hci_dev *hdev, struct sock *skip)
510 __le32 options = get_missing_options(hdev);
512 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
513 sizeof(options), skip);
516 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
518 __le32 options = get_missing_options(hdev);
520 return cmd_complete(sk, hdev->id, opcode, 0, &options,
524 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
525 void *data, u16 data_len)
527 struct mgmt_rp_read_config_info rp;
530 BT_DBG("sock %p %s", sk, hdev->name);
534 memset(&rp, 0, sizeof(rp));
535 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
537 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
538 options |= MGMT_OPTION_EXTERNAL_CONFIG;
540 if (hdev->set_bdaddr)
541 options |= MGMT_OPTION_PUBLIC_ADDRESS;
543 rp.supported_options = cpu_to_le32(options);
544 rp.missing_options = get_missing_options(hdev);
546 hci_dev_unlock(hdev);
548 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
552 static u32 get_supported_settings(struct hci_dev *hdev)
556 settings |= MGMT_SETTING_POWERED;
557 settings |= MGMT_SETTING_BONDABLE;
558 settings |= MGMT_SETTING_DEBUG_KEYS;
559 settings |= MGMT_SETTING_CONNECTABLE;
560 settings |= MGMT_SETTING_DISCOVERABLE;
562 if (lmp_bredr_capable(hdev)) {
563 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
564 settings |= MGMT_SETTING_FAST_CONNECTABLE;
565 settings |= MGMT_SETTING_BREDR;
566 settings |= MGMT_SETTING_LINK_SECURITY;
568 if (lmp_ssp_capable(hdev)) {
569 settings |= MGMT_SETTING_SSP;
570 settings |= MGMT_SETTING_HS;
573 if (lmp_sc_capable(hdev))
574 settings |= MGMT_SETTING_SECURE_CONN;
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_SECURE_CONN;
581 settings |= MGMT_SETTING_PRIVACY;
584 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
586 settings |= MGMT_SETTING_CONFIGURATION;
591 static u32 get_current_settings(struct hci_dev *hdev)
595 if (hdev_is_powered(hdev))
596 settings |= MGMT_SETTING_POWERED;
598 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
599 settings |= MGMT_SETTING_CONNECTABLE;
601 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
602 settings |= MGMT_SETTING_FAST_CONNECTABLE;
604 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
605 settings |= MGMT_SETTING_DISCOVERABLE;
607 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
608 settings |= MGMT_SETTING_BONDABLE;
610 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
611 settings |= MGMT_SETTING_BREDR;
613 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
614 settings |= MGMT_SETTING_LE;
616 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
617 settings |= MGMT_SETTING_LINK_SECURITY;
619 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
620 settings |= MGMT_SETTING_SSP;
622 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
623 settings |= MGMT_SETTING_HS;
625 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
626 settings |= MGMT_SETTING_ADVERTISING;
628 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
629 settings |= MGMT_SETTING_SECURE_CONN;
631 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
632 settings |= MGMT_SETTING_DEBUG_KEYS;
634 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
635 settings |= MGMT_SETTING_PRIVACY;
640 #define PNP_INFO_SVCLASS_ID 0x1200
642 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
644 u8 *ptr = data, *uuids_start = NULL;
645 struct bt_uuid *uuid;
650 list_for_each_entry(uuid, &hdev->uuids, list) {
653 if (uuid->size != 16)
656 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
660 if (uuid16 == PNP_INFO_SVCLASS_ID)
666 uuids_start[1] = EIR_UUID16_ALL;
670 /* Stop if not enough space to put next UUID */
671 if ((ptr - data) + sizeof(u16) > len) {
672 uuids_start[1] = EIR_UUID16_SOME;
676 *ptr++ = (uuid16 & 0x00ff);
677 *ptr++ = (uuid16 & 0xff00) >> 8;
678 uuids_start[0] += sizeof(uuid16);
684 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
686 u8 *ptr = data, *uuids_start = NULL;
687 struct bt_uuid *uuid;
692 list_for_each_entry(uuid, &hdev->uuids, list) {
693 if (uuid->size != 32)
699 uuids_start[1] = EIR_UUID32_ALL;
703 /* Stop if not enough space to put next UUID */
704 if ((ptr - data) + sizeof(u32) > len) {
705 uuids_start[1] = EIR_UUID32_SOME;
709 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
711 uuids_start[0] += sizeof(u32);
717 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
719 u8 *ptr = data, *uuids_start = NULL;
720 struct bt_uuid *uuid;
725 list_for_each_entry(uuid, &hdev->uuids, list) {
726 if (uuid->size != 128)
732 uuids_start[1] = EIR_UUID128_ALL;
736 /* Stop if not enough space to put next UUID */
737 if ((ptr - data) + 16 > len) {
738 uuids_start[1] = EIR_UUID128_SOME;
742 memcpy(ptr, uuid->uuid, 16);
744 uuids_start[0] += 16;
750 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
752 struct pending_cmd *cmd;
754 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
755 if (cmd->opcode == opcode)
762 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
763 struct hci_dev *hdev,
766 struct pending_cmd *cmd;
768 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
769 if (cmd->user_data != data)
771 if (cmd->opcode == opcode)
778 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
783 name_len = strlen(hdev->dev_name);
785 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
787 if (name_len > max_len) {
789 ptr[1] = EIR_NAME_SHORT;
791 ptr[1] = EIR_NAME_COMPLETE;
793 ptr[0] = name_len + 1;
795 memcpy(ptr + 2, hdev->dev_name, name_len);
797 ad_len += (name_len + 2);
798 ptr += (name_len + 2);
804 static void update_scan_rsp_data(struct hci_request *req)
806 struct hci_dev *hdev = req->hdev;
807 struct hci_cp_le_set_scan_rsp_data cp;
810 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
813 memset(&cp, 0, sizeof(cp));
815 len = create_scan_rsp_data(hdev, cp.data);
817 if (hdev->scan_rsp_data_len == len &&
818 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
821 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
822 hdev->scan_rsp_data_len = len;
826 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
829 static u8 get_adv_discov_flags(struct hci_dev *hdev)
831 struct pending_cmd *cmd;
833 /* If there's a pending mgmt command the flags will not yet have
834 * their final values, so check for this first.
836 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
838 struct mgmt_mode *cp = cmd->param;
840 return LE_AD_GENERAL;
841 else if (cp->val == 0x02)
842 return LE_AD_LIMITED;
844 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
845 return LE_AD_LIMITED;
846 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
847 return LE_AD_GENERAL;
853 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
855 u8 ad_len = 0, flags = 0;
857 flags |= get_adv_discov_flags(hdev);
859 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
860 flags |= LE_AD_NO_BREDR;
863 BT_DBG("adv flags 0x%02x", flags);
873 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
875 ptr[1] = EIR_TX_POWER;
876 ptr[2] = (u8) hdev->adv_tx_power;
885 static void update_adv_data(struct hci_request *req)
887 struct hci_dev *hdev = req->hdev;
888 struct hci_cp_le_set_adv_data cp;
891 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
894 memset(&cp, 0, sizeof(cp));
896 len = create_adv_data(hdev, cp.data);
898 if (hdev->adv_data_len == len &&
899 memcmp(cp.data, hdev->adv_data, len) == 0)
902 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
903 hdev->adv_data_len = len;
907 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
910 int mgmt_update_adv_data(struct hci_dev *hdev)
912 struct hci_request req;
914 hci_req_init(&req, hdev);
915 update_adv_data(&req);
917 return hci_req_run(&req, NULL);
920 static void create_eir(struct hci_dev *hdev, u8 *data)
925 name_len = strlen(hdev->dev_name);
931 ptr[1] = EIR_NAME_SHORT;
933 ptr[1] = EIR_NAME_COMPLETE;
935 /* EIR Data length */
936 ptr[0] = name_len + 1;
938 memcpy(ptr + 2, hdev->dev_name, name_len);
940 ptr += (name_len + 2);
943 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
945 ptr[1] = EIR_TX_POWER;
946 ptr[2] = (u8) hdev->inq_tx_power;
951 if (hdev->devid_source > 0) {
953 ptr[1] = EIR_DEVICE_ID;
955 put_unaligned_le16(hdev->devid_source, ptr + 2);
956 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
957 put_unaligned_le16(hdev->devid_product, ptr + 6);
958 put_unaligned_le16(hdev->devid_version, ptr + 8);
963 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
964 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
965 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
968 static void update_eir(struct hci_request *req)
970 struct hci_dev *hdev = req->hdev;
971 struct hci_cp_write_eir cp;
973 if (!hdev_is_powered(hdev))
976 if (!lmp_ext_inq_capable(hdev))
979 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
982 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
985 memset(&cp, 0, sizeof(cp));
987 create_eir(hdev, cp.data);
989 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
992 memcpy(hdev->eir, cp.data, sizeof(cp.data));
994 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
997 static u8 get_service_classes(struct hci_dev *hdev)
999 struct bt_uuid *uuid;
1002 list_for_each_entry(uuid, &hdev->uuids, list)
1003 val |= uuid->svc_hint;
1008 static void update_class(struct hci_request *req)
1010 struct hci_dev *hdev = req->hdev;
1013 BT_DBG("%s", hdev->name);
1015 if (!hdev_is_powered(hdev))
1018 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1021 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1024 cod[0] = hdev->minor_class;
1025 cod[1] = hdev->major_class;
1026 cod[2] = get_service_classes(hdev);
1028 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1031 if (memcmp(cod, hdev->dev_class, 3) == 0)
1034 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1037 static bool get_connectable(struct hci_dev *hdev)
1039 struct pending_cmd *cmd;
1041 /* If there's a pending mgmt command the flag will not yet have
1042 * it's final value, so check for this first.
1044 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1046 struct mgmt_mode *cp = cmd->param;
1050 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1053 static void disable_advertising(struct hci_request *req)
1057 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1060 static void enable_advertising(struct hci_request *req)
1062 struct hci_dev *hdev = req->hdev;
1063 struct hci_cp_le_set_adv_param cp;
1064 u8 own_addr_type, enable = 0x01;
1067 if (hci_conn_num(hdev, LE_LINK) > 0)
1070 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1071 disable_advertising(req);
1073 /* Clear the HCI_LE_ADV bit temporarily so that the
1074 * hci_update_random_address knows that it's safe to go ahead
1075 * and write a new random address. The flag will be set back on
1076 * as soon as the SET_ADV_ENABLE HCI command completes.
1078 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1080 connectable = get_connectable(hdev);
1082 /* Set require_privacy to true only when non-connectable
1083 * advertising is used. In that case it is fine to use a
1084 * non-resolvable private address.
1086 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1089 memset(&cp, 0, sizeof(cp));
1090 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1091 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1092 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1093 cp.own_address_type = own_addr_type;
1094 cp.channel_map = hdev->le_adv_channel_map;
1096 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1098 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1101 static void service_cache_off(struct work_struct *work)
1103 struct hci_dev *hdev = container_of(work, struct hci_dev,
1104 service_cache.work);
1105 struct hci_request req;
1107 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1110 hci_req_init(&req, hdev);
1117 hci_dev_unlock(hdev);
1119 hci_req_run(&req, NULL);
1122 static void rpa_expired(struct work_struct *work)
1124 struct hci_dev *hdev = container_of(work, struct hci_dev,
1126 struct hci_request req;
1130 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1132 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1135 /* The generation of a new RPA and programming it into the
1136 * controller happens in the enable_advertising() function.
1138 hci_req_init(&req, hdev);
1139 enable_advertising(&req);
1140 hci_req_run(&req, NULL);
1143 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1145 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1148 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1149 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1151 /* Non-mgmt controlled devices get this bit set
1152 * implicitly so that pairing works for them, however
1153 * for mgmt we require user-space to explicitly enable
1156 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1159 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1160 void *data, u16 data_len)
1162 struct mgmt_rp_read_info rp;
1164 BT_DBG("sock %p %s", sk, hdev->name);
1168 memset(&rp, 0, sizeof(rp));
1170 bacpy(&rp.bdaddr, &hdev->bdaddr);
1172 rp.version = hdev->hci_ver;
1173 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1175 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1176 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1178 memcpy(rp.dev_class, hdev->dev_class, 3);
1180 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1181 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1183 hci_dev_unlock(hdev);
1185 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1189 static void mgmt_pending_free(struct pending_cmd *cmd)
1196 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1197 struct hci_dev *hdev, void *data,
1200 struct pending_cmd *cmd;
1202 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1206 cmd->opcode = opcode;
1207 cmd->index = hdev->id;
1209 cmd->param = kmemdup(data, len, GFP_KERNEL);
1215 cmd->param_len = len;
1220 list_add(&cmd->list, &hdev->mgmt_pending);
1225 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1226 void (*cb)(struct pending_cmd *cmd,
1230 struct pending_cmd *cmd, *tmp;
1232 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1233 if (opcode > 0 && cmd->opcode != opcode)
1240 static void mgmt_pending_remove(struct pending_cmd *cmd)
1242 list_del(&cmd->list);
1243 mgmt_pending_free(cmd);
1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1254 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1256 BT_DBG("%s status 0x%02x", hdev->name, status);
1258 if (hci_conn_count(hdev) == 0) {
1259 cancel_delayed_work(&hdev->power_off);
1260 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1264 static bool hci_stop_discovery(struct hci_request *req)
1266 struct hci_dev *hdev = req->hdev;
1267 struct hci_cp_remote_name_req_cancel cp;
1268 struct inquiry_entry *e;
1270 switch (hdev->discovery.state) {
1271 case DISCOVERY_FINDING:
1272 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1273 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1275 cancel_delayed_work(&hdev->le_scan_disable);
1276 hci_req_add_le_scan_disable(req);
1281 case DISCOVERY_RESOLVING:
1282 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1287 bacpy(&cp.bdaddr, &e->data.bdaddr);
1288 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1294 /* Passive scanning */
1295 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1296 hci_req_add_le_scan_disable(req);
1306 static int clean_up_hci_state(struct hci_dev *hdev)
1308 struct hci_request req;
1309 struct hci_conn *conn;
1310 bool discov_stopped;
1313 hci_req_init(&req, hdev);
1315 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1316 test_bit(HCI_PSCAN, &hdev->flags)) {
1318 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1321 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1322 disable_advertising(&req);
1324 discov_stopped = hci_stop_discovery(&req);
1326 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1327 struct hci_cp_disconnect dc;
1328 struct hci_cp_reject_conn_req rej;
1330 switch (conn->state) {
1333 dc.handle = cpu_to_le16(conn->handle);
1334 dc.reason = 0x15; /* Terminated due to Power Off */
1335 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1338 if (conn->type == LE_LINK)
1339 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1341 else if (conn->type == ACL_LINK)
1342 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1346 bacpy(&rej.bdaddr, &conn->dst);
1347 rej.reason = 0x15; /* Terminated due to Power Off */
1348 if (conn->type == ACL_LINK)
1349 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1351 else if (conn->type == SCO_LINK)
1352 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1358 err = hci_req_run(&req, clean_up_hci_complete);
1359 if (!err && discov_stopped)
1360 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1365 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1368 struct mgmt_mode *cp = data;
1369 struct pending_cmd *cmd;
1372 BT_DBG("request for %s", hdev->name);
1374 if (cp->val != 0x00 && cp->val != 0x01)
1375 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1376 MGMT_STATUS_INVALID_PARAMS);
1380 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1381 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1386 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1387 cancel_delayed_work(&hdev->power_off);
1390 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1392 err = mgmt_powered(hdev, 1);
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1409 queue_work(hdev->req_workqueue, &hdev->power_on);
1412 /* Disconnect connections, stop scans, etc */
1413 err = clean_up_hci_state(hdev);
1415 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1416 HCI_POWER_OFF_TIMEOUT);
1418 /* ENODATA means there were no HCI commands queued */
1419 if (err == -ENODATA) {
1420 cancel_delayed_work(&hdev->power_off);
1421 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1427 hci_dev_unlock(hdev);
1431 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1435 ev = cpu_to_le32(get_current_settings(hdev));
1437 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1440 int mgmt_new_settings(struct hci_dev *hdev)
1442 return new_settings(hdev, NULL);
1447 struct hci_dev *hdev;
1451 static void settings_rsp(struct pending_cmd *cmd, void *data)
1453 struct cmd_lookup *match = data;
1455 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1457 list_del(&cmd->list);
1459 if (match->sk == NULL) {
1460 match->sk = cmd->sk;
1461 sock_hold(match->sk);
1464 mgmt_pending_free(cmd);
1467 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1471 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1472 mgmt_pending_remove(cmd);
1475 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1477 if (cmd->cmd_complete) {
1480 cmd->cmd_complete(cmd, *status);
1481 mgmt_pending_remove(cmd);
1486 cmd_status_rsp(cmd, data);
1489 static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1491 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1492 cmd->param, cmd->param_len);
1495 static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1497 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1498 sizeof(struct mgmt_addr_info));
1501 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1503 if (!lmp_bredr_capable(hdev))
1504 return MGMT_STATUS_NOT_SUPPORTED;
1505 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1506 return MGMT_STATUS_REJECTED;
1508 return MGMT_STATUS_SUCCESS;
1511 static u8 mgmt_le_support(struct hci_dev *hdev)
1513 if (!lmp_le_capable(hdev))
1514 return MGMT_STATUS_NOT_SUPPORTED;
1515 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1516 return MGMT_STATUS_REJECTED;
1518 return MGMT_STATUS_SUCCESS;
1521 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1524 struct pending_cmd *cmd;
1525 struct mgmt_mode *cp;
1526 struct hci_request req;
1529 BT_DBG("status 0x%02x", status);
1533 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1538 u8 mgmt_err = mgmt_status(status);
1539 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1540 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1546 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1549 if (hdev->discov_timeout > 0) {
1550 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1551 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1555 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1559 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1562 new_settings(hdev, cmd->sk);
1564 /* When the discoverable mode gets changed, make sure
1565 * that class of device has the limited discoverable
1566 * bit correctly set. Also update page scan based on whitelist
1569 hci_req_init(&req, hdev);
1570 __hci_update_page_scan(&req);
1572 hci_req_run(&req, NULL);
1575 mgmt_pending_remove(cmd);
1578 hci_dev_unlock(hdev);
1581 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1584 struct mgmt_cp_set_discoverable *cp = data;
1585 struct pending_cmd *cmd;
1586 struct hci_request req;
1591 BT_DBG("request for %s", hdev->name);
1593 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1594 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1595 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1596 MGMT_STATUS_REJECTED);
1598 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1599 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_INVALID_PARAMS);
1602 timeout = __le16_to_cpu(cp->timeout);
1604 /* Disabling discoverable requires that no timeout is set,
1605 * and enabling limited discoverable requires a timeout.
1607 if ((cp->val == 0x00 && timeout > 0) ||
1608 (cp->val == 0x02 && timeout == 0))
1609 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1610 MGMT_STATUS_INVALID_PARAMS);
1614 if (!hdev_is_powered(hdev) && timeout > 0) {
1615 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1616 MGMT_STATUS_NOT_POWERED);
1620 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1621 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1622 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1627 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1628 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1629 MGMT_STATUS_REJECTED);
1633 if (!hdev_is_powered(hdev)) {
1634 bool changed = false;
1636 /* Setting limited discoverable when powered off is
1637 * not a valid operation since it requires a timeout
1638 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1640 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1641 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1645 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1650 err = new_settings(hdev, sk);
1655 /* If the current mode is the same, then just update the timeout
1656 * value with the new value. And if only the timeout gets updated,
1657 * then no need for any HCI transactions.
1659 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1660 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1661 &hdev->dev_flags)) {
1662 cancel_delayed_work(&hdev->discov_off);
1663 hdev->discov_timeout = timeout;
1665 if (cp->val && hdev->discov_timeout > 0) {
1666 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1667 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1671 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1675 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1681 /* Cancel any potential discoverable timeout that might be
1682 * still active and store new timeout value. The arming of
1683 * the timeout happens in the complete handler.
1685 cancel_delayed_work(&hdev->discov_off);
1686 hdev->discov_timeout = timeout;
1688 /* Limited discoverable mode */
1689 if (cp->val == 0x02)
1690 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1692 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1694 hci_req_init(&req, hdev);
1696 /* The procedure for LE-only controllers is much simpler - just
1697 * update the advertising data.
1699 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1705 struct hci_cp_write_current_iac_lap hci_cp;
1707 if (cp->val == 0x02) {
1708 /* Limited discoverable mode */
1709 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1710 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1711 hci_cp.iac_lap[1] = 0x8b;
1712 hci_cp.iac_lap[2] = 0x9e;
1713 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1714 hci_cp.iac_lap[4] = 0x8b;
1715 hci_cp.iac_lap[5] = 0x9e;
1717 /* General discoverable mode */
1719 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1720 hci_cp.iac_lap[1] = 0x8b;
1721 hci_cp.iac_lap[2] = 0x9e;
1724 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1725 (hci_cp.num_iac * 3) + 1, &hci_cp);
1727 scan |= SCAN_INQUIRY;
1729 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1732 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1735 update_adv_data(&req);
1737 err = hci_req_run(&req, set_discoverable_complete);
1739 mgmt_pending_remove(cmd);
1742 hci_dev_unlock(hdev);
1746 static void write_fast_connectable(struct hci_request *req, bool enable)
1748 struct hci_dev *hdev = req->hdev;
1749 struct hci_cp_write_page_scan_activity acp;
1752 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1755 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1759 type = PAGE_SCAN_TYPE_INTERLACED;
1761 /* 160 msec page scan interval */
1762 acp.interval = cpu_to_le16(0x0100);
1764 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1766 /* default 1.28 sec page scan */
1767 acp.interval = cpu_to_le16(0x0800);
1770 acp.window = cpu_to_le16(0x0012);
1772 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1773 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1774 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1777 if (hdev->page_scan_type != type)
1778 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1781 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1784 struct pending_cmd *cmd;
1785 struct mgmt_mode *cp;
1786 bool conn_changed, discov_changed;
1788 BT_DBG("status 0x%02x", status);
1792 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1797 u8 mgmt_err = mgmt_status(status);
1798 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1804 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1806 discov_changed = false;
1808 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1810 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1814 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1816 if (conn_changed || discov_changed) {
1817 new_settings(hdev, cmd->sk);
1818 hci_update_page_scan(hdev);
1820 mgmt_update_adv_data(hdev);
1821 hci_update_background_scan(hdev);
1825 mgmt_pending_remove(cmd);
1828 hci_dev_unlock(hdev);
1831 static int set_connectable_update_settings(struct hci_dev *hdev,
1832 struct sock *sk, u8 val)
1834 bool changed = false;
1837 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1841 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1843 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1844 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1847 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1852 hci_update_page_scan(hdev);
1853 hci_update_background_scan(hdev);
1854 return new_settings(hdev, sk);
1860 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1863 struct mgmt_mode *cp = data;
1864 struct pending_cmd *cmd;
1865 struct hci_request req;
1869 BT_DBG("request for %s", hdev->name);
1871 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1872 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1873 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1874 MGMT_STATUS_REJECTED);
1876 if (cp->val != 0x00 && cp->val != 0x01)
1877 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1878 MGMT_STATUS_INVALID_PARAMS);
1882 if (!hdev_is_powered(hdev)) {
1883 err = set_connectable_update_settings(hdev, sk, cp->val);
1887 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1888 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1889 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1894 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1900 hci_req_init(&req, hdev);
1902 /* If BR/EDR is not enabled and we disable advertising as a
1903 * by-product of disabling connectable, we need to update the
1904 * advertising flags.
1906 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1908 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1909 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1911 update_adv_data(&req);
1912 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1916 /* If we don't have any whitelist entries just
1917 * disable all scanning. If there are entries
1918 * and we had both page and inquiry scanning
1919 * enabled then fall back to only page scanning.
1920 * Otherwise no changes are needed.
1922 if (list_empty(&hdev->whitelist))
1923 scan = SCAN_DISABLED;
1924 else if (test_bit(HCI_ISCAN, &hdev->flags))
1927 goto no_scan_update;
1929 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1930 hdev->discov_timeout > 0)
1931 cancel_delayed_work(&hdev->discov_off);
1934 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1938 /* If we're going from non-connectable to connectable or
1939 * vice-versa when fast connectable is enabled ensure that fast
1940 * connectable gets disabled. write_fast_connectable won't do
1941 * anything if the page scan parameters are already what they
1944 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1945 write_fast_connectable(&req, false);
1947 /* Update the advertising parameters if necessary */
1948 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1949 enable_advertising(&req);
1951 err = hci_req_run(&req, set_connectable_complete);
1953 mgmt_pending_remove(cmd);
1954 if (err == -ENODATA)
1955 err = set_connectable_update_settings(hdev, sk,
1961 hci_dev_unlock(hdev);
1965 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1968 struct mgmt_mode *cp = data;
1972 BT_DBG("request for %s", hdev->name);
1974 if (cp->val != 0x00 && cp->val != 0x01)
1975 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1976 MGMT_STATUS_INVALID_PARAMS);
1981 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1983 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1985 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1990 err = new_settings(hdev, sk);
1993 hci_dev_unlock(hdev);
1997 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2000 struct mgmt_mode *cp = data;
2001 struct pending_cmd *cmd;
2005 BT_DBG("request for %s", hdev->name);
2007 status = mgmt_bredr_support(hdev);
2009 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2012 if (cp->val != 0x00 && cp->val != 0x01)
2013 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2014 MGMT_STATUS_INVALID_PARAMS);
2018 if (!hdev_is_powered(hdev)) {
2019 bool changed = false;
2021 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2022 &hdev->dev_flags)) {
2023 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2027 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2032 err = new_settings(hdev, sk);
2037 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2038 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2045 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2046 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2050 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2056 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2058 mgmt_pending_remove(cmd);
2063 hci_dev_unlock(hdev);
2067 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2069 struct mgmt_mode *cp = data;
2070 struct pending_cmd *cmd;
2074 BT_DBG("request for %s", hdev->name);
2076 status = mgmt_bredr_support(hdev);
2078 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2080 if (!lmp_ssp_capable(hdev))
2081 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2082 MGMT_STATUS_NOT_SUPPORTED);
2084 if (cp->val != 0x00 && cp->val != 0x01)
2085 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2086 MGMT_STATUS_INVALID_PARAMS);
2090 if (!hdev_is_powered(hdev)) {
2094 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2097 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2100 changed = test_and_clear_bit(HCI_HS_ENABLED,
2103 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2106 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2111 err = new_settings(hdev, sk);
2116 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2117 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2118 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2123 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2124 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2128 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2134 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2135 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2136 sizeof(cp->val), &cp->val);
2138 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2140 mgmt_pending_remove(cmd);
2145 hci_dev_unlock(hdev);
2149 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2151 struct mgmt_mode *cp = data;
2156 BT_DBG("request for %s", hdev->name);
2158 status = mgmt_bredr_support(hdev);
2160 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2162 if (!lmp_ssp_capable(hdev))
2163 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2164 MGMT_STATUS_NOT_SUPPORTED);
2166 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2167 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2168 MGMT_STATUS_REJECTED);
2170 if (cp->val != 0x00 && cp->val != 0x01)
2171 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2172 MGMT_STATUS_INVALID_PARAMS);
2177 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2179 if (hdev_is_powered(hdev)) {
2180 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2181 MGMT_STATUS_REJECTED);
2185 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2188 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2193 err = new_settings(hdev, sk);
2196 hci_dev_unlock(hdev);
2200 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2202 struct cmd_lookup match = { NULL, hdev };
2207 u8 mgmt_err = mgmt_status(status);
2209 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2214 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2216 new_settings(hdev, match.sk);
2221 /* Make sure the controller has a good default for
2222 * advertising data. Restrict the update to when LE
2223 * has actually been enabled. During power on, the
2224 * update in powered_update_hci will take care of it.
2226 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2227 struct hci_request req;
2229 hci_req_init(&req, hdev);
2230 update_adv_data(&req);
2231 update_scan_rsp_data(&req);
2232 __hci_update_background_scan(&req);
2233 hci_req_run(&req, NULL);
2237 hci_dev_unlock(hdev);
2240 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2242 struct mgmt_mode *cp = data;
2243 struct hci_cp_write_le_host_supported hci_cp;
2244 struct pending_cmd *cmd;
2245 struct hci_request req;
2249 BT_DBG("request for %s", hdev->name);
2251 if (!lmp_le_capable(hdev))
2252 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2253 MGMT_STATUS_NOT_SUPPORTED);
2255 if (cp->val != 0x00 && cp->val != 0x01)
2256 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2257 MGMT_STATUS_INVALID_PARAMS);
2259 /* LE-only devices do not allow toggling LE on/off */
2260 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2261 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2262 MGMT_STATUS_REJECTED);
2267 enabled = lmp_host_le_capable(hdev);
2269 if (!hdev_is_powered(hdev) || val == enabled) {
2270 bool changed = false;
2272 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2273 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2277 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2278 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2282 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2287 err = new_settings(hdev, sk);
2292 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2293 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2294 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2299 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2305 hci_req_init(&req, hdev);
2307 memset(&hci_cp, 0, sizeof(hci_cp));
2311 hci_cp.simul = 0x00;
2313 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2314 disable_advertising(&req);
2317 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2320 err = hci_req_run(&req, le_enable_complete);
2322 mgmt_pending_remove(cmd);
2325 hci_dev_unlock(hdev);
2329 /* This is a helper function to test for pending mgmt commands that can
2330 * cause CoD or EIR HCI commands. We can only allow one such pending
2331 * mgmt command at a time since otherwise we cannot easily track what
2332 * the current values are, will be, and based on that calculate if a new
2333 * HCI command needs to be sent and if yes with what value.
2335 static bool pending_eir_or_class(struct hci_dev *hdev)
2337 struct pending_cmd *cmd;
2339 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2340 switch (cmd->opcode) {
2341 case MGMT_OP_ADD_UUID:
2342 case MGMT_OP_REMOVE_UUID:
2343 case MGMT_OP_SET_DEV_CLASS:
2344 case MGMT_OP_SET_POWERED:
2352 static const u8 bluetooth_base_uuid[] = {
2353 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2354 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2357 static u8 get_uuid_size(const u8 *uuid)
2361 if (memcmp(uuid, bluetooth_base_uuid, 12))
2364 val = get_unaligned_le32(&uuid[12]);
2371 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2373 struct pending_cmd *cmd;
2377 cmd = mgmt_pending_find(mgmt_op, hdev);
2381 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2382 hdev->dev_class, 3);
2384 mgmt_pending_remove(cmd);
2387 hci_dev_unlock(hdev);
2390 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2392 BT_DBG("status 0x%02x", status);
2394 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2397 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2399 struct mgmt_cp_add_uuid *cp = data;
2400 struct pending_cmd *cmd;
2401 struct hci_request req;
2402 struct bt_uuid *uuid;
2405 BT_DBG("request for %s", hdev->name);
2409 if (pending_eir_or_class(hdev)) {
2410 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2415 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2421 memcpy(uuid->uuid, cp->uuid, 16);
2422 uuid->svc_hint = cp->svc_hint;
2423 uuid->size = get_uuid_size(cp->uuid);
2425 list_add_tail(&uuid->list, &hdev->uuids);
2427 hci_req_init(&req, hdev);
2432 err = hci_req_run(&req, add_uuid_complete);
2434 if (err != -ENODATA)
2437 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2438 hdev->dev_class, 3);
2442 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2451 hci_dev_unlock(hdev);
2455 static bool enable_service_cache(struct hci_dev *hdev)
2457 if (!hdev_is_powered(hdev))
2460 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2461 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2469 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2471 BT_DBG("status 0x%02x", status);
2473 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2476 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2479 struct mgmt_cp_remove_uuid *cp = data;
2480 struct pending_cmd *cmd;
2481 struct bt_uuid *match, *tmp;
2482 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2483 struct hci_request req;
2486 BT_DBG("request for %s", hdev->name);
2490 if (pending_eir_or_class(hdev)) {
2491 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2496 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2497 hci_uuids_clear(hdev);
2499 if (enable_service_cache(hdev)) {
2500 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2501 0, hdev->dev_class, 3);
2510 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2511 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2514 list_del(&match->list);
2520 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2521 MGMT_STATUS_INVALID_PARAMS);
2526 hci_req_init(&req, hdev);
2531 err = hci_req_run(&req, remove_uuid_complete);
2533 if (err != -ENODATA)
2536 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2537 hdev->dev_class, 3);
2541 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2550 hci_dev_unlock(hdev);
2554 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2556 BT_DBG("status 0x%02x", status);
2558 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2561 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2564 struct mgmt_cp_set_dev_class *cp = data;
2565 struct pending_cmd *cmd;
2566 struct hci_request req;
2569 BT_DBG("request for %s", hdev->name);
2571 if (!lmp_bredr_capable(hdev))
2572 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2573 MGMT_STATUS_NOT_SUPPORTED);
2577 if (pending_eir_or_class(hdev)) {
2578 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2583 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2584 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2585 MGMT_STATUS_INVALID_PARAMS);
2589 hdev->major_class = cp->major;
2590 hdev->minor_class = cp->minor;
2592 if (!hdev_is_powered(hdev)) {
2593 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2594 hdev->dev_class, 3);
2598 hci_req_init(&req, hdev);
2600 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2601 hci_dev_unlock(hdev);
2602 cancel_delayed_work_sync(&hdev->service_cache);
2609 err = hci_req_run(&req, set_class_complete);
2611 if (err != -ENODATA)
2614 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2615 hdev->dev_class, 3);
2619 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2628 hci_dev_unlock(hdev);
2632 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2635 struct mgmt_cp_load_link_keys *cp = data;
2636 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2637 sizeof(struct mgmt_link_key_info));
2638 u16 key_count, expected_len;
2642 BT_DBG("request for %s", hdev->name);
2644 if (!lmp_bredr_capable(hdev))
2645 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2646 MGMT_STATUS_NOT_SUPPORTED);
2648 key_count = __le16_to_cpu(cp->key_count);
2649 if (key_count > max_key_count) {
2650 BT_ERR("load_link_keys: too big key_count value %u",
2652 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2653 MGMT_STATUS_INVALID_PARAMS);
2656 expected_len = sizeof(*cp) + key_count *
2657 sizeof(struct mgmt_link_key_info);
2658 if (expected_len != len) {
2659 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2661 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2662 MGMT_STATUS_INVALID_PARAMS);
2665 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2666 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2667 MGMT_STATUS_INVALID_PARAMS);
2669 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2672 for (i = 0; i < key_count; i++) {
2673 struct mgmt_link_key_info *key = &cp->keys[i];
2675 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2676 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2677 MGMT_STATUS_INVALID_PARAMS);
2682 hci_link_keys_clear(hdev);
2685 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2688 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2692 new_settings(hdev, NULL);
2694 for (i = 0; i < key_count; i++) {
2695 struct mgmt_link_key_info *key = &cp->keys[i];
2697 /* Always ignore debug keys and require a new pairing if
2698 * the user wants to use them.
2700 if (key->type == HCI_LK_DEBUG_COMBINATION)
2703 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2704 key->type, key->pin_len, NULL);
2707 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2709 hci_dev_unlock(hdev);
2714 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2715 u8 addr_type, struct sock *skip_sk)
2717 struct mgmt_ev_device_unpaired ev;
2719 bacpy(&ev.addr.bdaddr, bdaddr);
2720 ev.addr.type = addr_type;
2722 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2726 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2729 struct mgmt_cp_unpair_device *cp = data;
2730 struct mgmt_rp_unpair_device rp;
2731 struct hci_cp_disconnect dc;
2732 struct pending_cmd *cmd;
2733 struct hci_conn *conn;
2736 memset(&rp, 0, sizeof(rp));
2737 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2738 rp.addr.type = cp->addr.type;
2740 if (!bdaddr_type_is_valid(cp->addr.type))
2741 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2742 MGMT_STATUS_INVALID_PARAMS,
2745 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2746 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2747 MGMT_STATUS_INVALID_PARAMS,
2752 if (!hdev_is_powered(hdev)) {
2753 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2754 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2758 if (cp->addr.type == BDADDR_BREDR) {
2759 /* If disconnection is requested, then look up the
2760 * connection. If the remote device is connected, it
2761 * will be later used to terminate the link.
2763 * Setting it to NULL explicitly will cause no
2764 * termination of the link.
2767 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2772 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2776 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2779 /* Defer clearing up the connection parameters
2780 * until closing to give a chance of keeping
2781 * them if a repairing happens.
2783 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2785 /* If disconnection is not requested, then
2786 * clear the connection variable so that the
2787 * link is not terminated.
2789 if (!cp->disconnect)
2793 if (cp->addr.type == BDADDR_LE_PUBLIC)
2794 addr_type = ADDR_LE_DEV_PUBLIC;
2796 addr_type = ADDR_LE_DEV_RANDOM;
2798 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2800 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2804 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2805 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2809 /* If the connection variable is set, then termination of the
2810 * link is requested.
2813 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2815 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2819 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2826 cmd->cmd_complete = addr_cmd_complete;
2828 dc.handle = cpu_to_le16(conn->handle);
2829 dc.reason = 0x13; /* Remote User Terminated Connection */
2830 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2832 mgmt_pending_remove(cmd);
2835 hci_dev_unlock(hdev);
2839 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2842 struct mgmt_cp_disconnect *cp = data;
2843 struct mgmt_rp_disconnect rp;
2844 struct pending_cmd *cmd;
2845 struct hci_conn *conn;
2850 memset(&rp, 0, sizeof(rp));
2851 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2852 rp.addr.type = cp->addr.type;
2854 if (!bdaddr_type_is_valid(cp->addr.type))
2855 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2856 MGMT_STATUS_INVALID_PARAMS,
2861 if (!test_bit(HCI_UP, &hdev->flags)) {
2862 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2863 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2867 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2868 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2869 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2873 if (cp->addr.type == BDADDR_BREDR)
2874 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2877 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2879 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2880 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2881 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2885 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2891 cmd->cmd_complete = generic_cmd_complete;
2893 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2895 mgmt_pending_remove(cmd);
2898 hci_dev_unlock(hdev);
2902 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2904 switch (link_type) {
2906 switch (addr_type) {
2907 case ADDR_LE_DEV_PUBLIC:
2908 return BDADDR_LE_PUBLIC;
2911 /* Fallback to LE Random address type */
2912 return BDADDR_LE_RANDOM;
2916 /* Fallback to BR/EDR type */
2917 return BDADDR_BREDR;
2921 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2924 struct mgmt_rp_get_connections *rp;
2934 if (!hdev_is_powered(hdev)) {
2935 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2936 MGMT_STATUS_NOT_POWERED);
2941 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2942 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2946 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2947 rp = kmalloc(rp_len, GFP_KERNEL);
2954 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2955 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2957 bacpy(&rp->addr[i].bdaddr, &c->dst);
2958 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2959 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2964 rp->conn_count = cpu_to_le16(i);
2966 /* Recalculate length in case of filtered SCO connections, etc */
2967 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2969 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2975 hci_dev_unlock(hdev);
2979 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2980 struct mgmt_cp_pin_code_neg_reply *cp)
2982 struct pending_cmd *cmd;
2985 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2990 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2991 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2993 mgmt_pending_remove(cmd);
2998 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3001 struct hci_conn *conn;
3002 struct mgmt_cp_pin_code_reply *cp = data;
3003 struct hci_cp_pin_code_reply reply;
3004 struct pending_cmd *cmd;
3011 if (!hdev_is_powered(hdev)) {
3012 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3013 MGMT_STATUS_NOT_POWERED);
3017 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3019 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3020 MGMT_STATUS_NOT_CONNECTED);
3024 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3025 struct mgmt_cp_pin_code_neg_reply ncp;
3027 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3029 BT_ERR("PIN code is not 16 bytes long");
3031 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3033 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3034 MGMT_STATUS_INVALID_PARAMS);
3039 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3045 cmd->cmd_complete = addr_cmd_complete;
3047 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3048 reply.pin_len = cp->pin_len;
3049 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3051 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3053 mgmt_pending_remove(cmd);
3056 hci_dev_unlock(hdev);
3060 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3063 struct mgmt_cp_set_io_capability *cp = data;
3067 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3068 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3069 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3073 hdev->io_capability = cp->io_capability;
3075 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3076 hdev->io_capability);
3078 hci_dev_unlock(hdev);
3080 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3084 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3086 struct hci_dev *hdev = conn->hdev;
3087 struct pending_cmd *cmd;
3089 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3090 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3093 if (cmd->user_data != conn)
3102 static int pairing_complete(struct pending_cmd *cmd, u8 status)
3104 struct mgmt_rp_pair_device rp;
3105 struct hci_conn *conn = cmd->user_data;
3108 bacpy(&rp.addr.bdaddr, &conn->dst);
3109 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3111 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3114 /* So we don't get further callbacks for this connection */
3115 conn->connect_cfm_cb = NULL;
3116 conn->security_cfm_cb = NULL;
3117 conn->disconn_cfm_cb = NULL;
3119 hci_conn_drop(conn);
3121 /* The device is paired so there is no need to remove
3122 * its connection parameters anymore.
3124 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3131 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3133 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3134 struct pending_cmd *cmd;
3136 cmd = find_pairing(conn);
3138 cmd->cmd_complete(cmd, status);
3139 mgmt_pending_remove(cmd);
3143 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3145 struct pending_cmd *cmd;
3147 BT_DBG("status %u", status);
3149 cmd = find_pairing(conn);
3151 BT_DBG("Unable to find a pending command");
3155 cmd->cmd_complete(cmd, mgmt_status(status));
3156 mgmt_pending_remove(cmd);
3159 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3161 struct pending_cmd *cmd;
3163 BT_DBG("status %u", status);
3168 cmd = find_pairing(conn);
3170 BT_DBG("Unable to find a pending command");
3174 cmd->cmd_complete(cmd, mgmt_status(status));
3175 mgmt_pending_remove(cmd);
3178 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3181 struct mgmt_cp_pair_device *cp = data;
3182 struct mgmt_rp_pair_device rp;
3183 struct pending_cmd *cmd;
3184 u8 sec_level, auth_type;
3185 struct hci_conn *conn;
3190 memset(&rp, 0, sizeof(rp));
3191 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3192 rp.addr.type = cp->addr.type;
3194 if (!bdaddr_type_is_valid(cp->addr.type))
3195 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3196 MGMT_STATUS_INVALID_PARAMS,
3199 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3200 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3201 MGMT_STATUS_INVALID_PARAMS,
3206 if (!hdev_is_powered(hdev)) {
3207 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3208 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3212 sec_level = BT_SECURITY_MEDIUM;
3213 auth_type = HCI_AT_DEDICATED_BONDING;
3215 if (cp->addr.type == BDADDR_BREDR) {
3216 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3221 /* Convert from L2CAP channel address type to HCI address type
3223 if (cp->addr.type == BDADDR_LE_PUBLIC)
3224 addr_type = ADDR_LE_DEV_PUBLIC;
3226 addr_type = ADDR_LE_DEV_RANDOM;
3228 /* When pairing a new device, it is expected to remember
3229 * this device for future connections. Adding the connection
3230 * parameter information ahead of time allows tracking
3231 * of the slave preferred values and will speed up any
3232 * further connection establishment.
3234 * If connection parameters already exist, then they
3235 * will be kept and this function does nothing.
3237 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3239 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3240 sec_level, HCI_LE_CONN_TIMEOUT,
3247 if (PTR_ERR(conn) == -EBUSY)
3248 status = MGMT_STATUS_BUSY;
3250 status = MGMT_STATUS_CONNECT_FAILED;
3252 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3258 if (conn->connect_cfm_cb) {
3259 hci_conn_drop(conn);
3260 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3261 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3265 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3268 hci_conn_drop(conn);
3272 cmd->cmd_complete = pairing_complete;
3274 /* For LE, just connecting isn't a proof that the pairing finished */
3275 if (cp->addr.type == BDADDR_BREDR) {
3276 conn->connect_cfm_cb = pairing_complete_cb;
3277 conn->security_cfm_cb = pairing_complete_cb;
3278 conn->disconn_cfm_cb = pairing_complete_cb;
3280 conn->connect_cfm_cb = le_pairing_complete_cb;
3281 conn->security_cfm_cb = le_pairing_complete_cb;
3282 conn->disconn_cfm_cb = le_pairing_complete_cb;
3285 conn->io_capability = cp->io_cap;
3286 cmd->user_data = hci_conn_get(conn);
3288 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3289 hci_conn_security(conn, sec_level, auth_type, true)) {
3290 cmd->cmd_complete(cmd, 0);
3291 mgmt_pending_remove(cmd);
3297 hci_dev_unlock(hdev);
3301 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3304 struct mgmt_addr_info *addr = data;
3305 struct pending_cmd *cmd;
3306 struct hci_conn *conn;
3313 if (!hdev_is_powered(hdev)) {
3314 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3315 MGMT_STATUS_NOT_POWERED);
3319 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3321 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3322 MGMT_STATUS_INVALID_PARAMS);
3326 conn = cmd->user_data;
3328 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3329 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3330 MGMT_STATUS_INVALID_PARAMS);
3334 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3335 mgmt_pending_remove(cmd);
3337 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3338 addr, sizeof(*addr));
3340 hci_dev_unlock(hdev);
3344 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3345 struct mgmt_addr_info *addr, u16 mgmt_op,
3346 u16 hci_op, __le32 passkey)
3348 struct pending_cmd *cmd;
3349 struct hci_conn *conn;
3354 if (!hdev_is_powered(hdev)) {
3355 err = cmd_complete(sk, hdev->id, mgmt_op,
3356 MGMT_STATUS_NOT_POWERED, addr,
3361 if (addr->type == BDADDR_BREDR)
3362 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3364 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3367 err = cmd_complete(sk, hdev->id, mgmt_op,
3368 MGMT_STATUS_NOT_CONNECTED, addr,
3373 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3374 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3376 err = cmd_complete(sk, hdev->id, mgmt_op,
3377 MGMT_STATUS_SUCCESS, addr,
3380 err = cmd_complete(sk, hdev->id, mgmt_op,
3381 MGMT_STATUS_FAILED, addr,
3387 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3393 cmd->cmd_complete = addr_cmd_complete;
3395 /* Continue with pairing via HCI */
3396 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3397 struct hci_cp_user_passkey_reply cp;
3399 bacpy(&cp.bdaddr, &addr->bdaddr);
3400 cp.passkey = passkey;
3401 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3403 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3407 mgmt_pending_remove(cmd);
3410 hci_dev_unlock(hdev);
3414 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3415 void *data, u16 len)
3417 struct mgmt_cp_pin_code_neg_reply *cp = data;
3421 return user_pairing_resp(sk, hdev, &cp->addr,
3422 MGMT_OP_PIN_CODE_NEG_REPLY,
3423 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3426 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3429 struct mgmt_cp_user_confirm_reply *cp = data;
3433 if (len != sizeof(*cp))
3434 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3435 MGMT_STATUS_INVALID_PARAMS);
3437 return user_pairing_resp(sk, hdev, &cp->addr,
3438 MGMT_OP_USER_CONFIRM_REPLY,
3439 HCI_OP_USER_CONFIRM_REPLY, 0);
3442 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3443 void *data, u16 len)
3445 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3449 return user_pairing_resp(sk, hdev, &cp->addr,
3450 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3451 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3454 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3457 struct mgmt_cp_user_passkey_reply *cp = data;
3461 return user_pairing_resp(sk, hdev, &cp->addr,
3462 MGMT_OP_USER_PASSKEY_REPLY,
3463 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3466 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3467 void *data, u16 len)
3469 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3473 return user_pairing_resp(sk, hdev, &cp->addr,
3474 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3475 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3478 static void update_name(struct hci_request *req)
3480 struct hci_dev *hdev = req->hdev;
3481 struct hci_cp_write_local_name cp;
3483 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3485 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3488 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3490 struct mgmt_cp_set_local_name *cp;
3491 struct pending_cmd *cmd;
3493 BT_DBG("status 0x%02x", status);
3497 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3504 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3505 mgmt_status(status));
3507 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3510 mgmt_pending_remove(cmd);
3513 hci_dev_unlock(hdev);
3516 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3519 struct mgmt_cp_set_local_name *cp = data;
3520 struct pending_cmd *cmd;
3521 struct hci_request req;
3528 /* If the old values are the same as the new ones just return a
3529 * direct command complete event.
3531 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3532 !memcmp(hdev->short_name, cp->short_name,
3533 sizeof(hdev->short_name))) {
3534 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3539 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3541 if (!hdev_is_powered(hdev)) {
3542 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3544 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3549 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3555 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3561 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3563 hci_req_init(&req, hdev);
3565 if (lmp_bredr_capable(hdev)) {
3570 /* The name is stored in the scan response data and so
3571 * no need to udpate the advertising data here.
3573 if (lmp_le_capable(hdev))
3574 update_scan_rsp_data(&req);
3576 err = hci_req_run(&req, set_name_complete);
3578 mgmt_pending_remove(cmd);
3581 hci_dev_unlock(hdev);
3585 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3586 void *data, u16 data_len)
3588 struct pending_cmd *cmd;
3591 BT_DBG("%s", hdev->name);
3595 if (!hdev_is_powered(hdev)) {
3596 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3597 MGMT_STATUS_NOT_POWERED);
3601 if (!lmp_ssp_capable(hdev)) {
3602 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3603 MGMT_STATUS_NOT_SUPPORTED);
3607 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3608 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3613 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3619 if (bredr_sc_enabled(hdev))
3620 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3623 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3626 mgmt_pending_remove(cmd);
3629 hci_dev_unlock(hdev);
3633 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3634 void *data, u16 len)
3636 struct mgmt_addr_info *addr = data;
3639 BT_DBG("%s ", hdev->name);
3641 if (!bdaddr_type_is_valid(addr->type))
3642 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3643 MGMT_STATUS_INVALID_PARAMS, addr,
3648 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3649 struct mgmt_cp_add_remote_oob_data *cp = data;
3652 if (cp->addr.type != BDADDR_BREDR) {
3653 err = cmd_complete(sk, hdev->id,
3654 MGMT_OP_ADD_REMOTE_OOB_DATA,
3655 MGMT_STATUS_INVALID_PARAMS,
3656 &cp->addr, sizeof(cp->addr));
3660 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3661 cp->addr.type, cp->hash,
3662 cp->rand, NULL, NULL);
3664 status = MGMT_STATUS_FAILED;
3666 status = MGMT_STATUS_SUCCESS;
3668 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3669 status, &cp->addr, sizeof(cp->addr));
3670 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3671 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3672 u8 *rand192, *hash192;
3675 if (bdaddr_type_is_le(cp->addr.type)) {
3679 rand192 = cp->rand192;
3680 hash192 = cp->hash192;
3683 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3684 cp->addr.type, hash192, rand192,
3685 cp->hash256, cp->rand256);
3687 status = MGMT_STATUS_FAILED;
3689 status = MGMT_STATUS_SUCCESS;
3691 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3692 status, &cp->addr, sizeof(cp->addr));
3694 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3695 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3696 MGMT_STATUS_INVALID_PARAMS);
3700 hci_dev_unlock(hdev);
3704 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3705 void *data, u16 len)
3707 struct mgmt_cp_remove_remote_oob_data *cp = data;
3711 BT_DBG("%s", hdev->name);
3713 if (cp->addr.type != BDADDR_BREDR)
3714 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3715 MGMT_STATUS_INVALID_PARAMS,
3716 &cp->addr, sizeof(cp->addr));
3720 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3721 hci_remote_oob_data_clear(hdev);
3722 status = MGMT_STATUS_SUCCESS;
3726 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3728 status = MGMT_STATUS_INVALID_PARAMS;
3730 status = MGMT_STATUS_SUCCESS;
3733 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3734 status, &cp->addr, sizeof(cp->addr));
3736 hci_dev_unlock(hdev);
3740 static bool trigger_discovery(struct hci_request *req, u8 *status)
3742 struct hci_dev *hdev = req->hdev;
3743 struct hci_cp_le_set_scan_param param_cp;
3744 struct hci_cp_le_set_scan_enable enable_cp;
3745 struct hci_cp_inquiry inq_cp;
3746 /* General inquiry access code (GIAC) */
3747 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3751 switch (hdev->discovery.type) {
3752 case DISCOV_TYPE_BREDR:
3753 *status = mgmt_bredr_support(hdev);
3757 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3758 *status = MGMT_STATUS_BUSY;
3762 hci_inquiry_cache_flush(hdev);
3764 memset(&inq_cp, 0, sizeof(inq_cp));
3765 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3766 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3767 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3770 case DISCOV_TYPE_LE:
3771 case DISCOV_TYPE_INTERLEAVED:
3772 *status = mgmt_le_support(hdev);
3776 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3777 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3778 *status = MGMT_STATUS_NOT_SUPPORTED;
3782 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3783 /* Don't let discovery abort an outgoing
3784 * connection attempt that's using directed
3787 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3789 *status = MGMT_STATUS_REJECTED;
3793 disable_advertising(req);
3796 /* If controller is scanning, it means the background scanning
3797 * is running. Thus, we should temporarily stop it in order to
3798 * set the discovery scanning parameters.
3800 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3801 hci_req_add_le_scan_disable(req);
3803 memset(¶m_cp, 0, sizeof(param_cp));
3805 /* All active scans will be done with either a resolvable
3806 * private address (when privacy feature has been enabled)
3807 * or non-resolvable private address.
3809 err = hci_update_random_address(req, true, &own_addr_type);
3811 *status = MGMT_STATUS_FAILED;
3815 param_cp.type = LE_SCAN_ACTIVE;
3816 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3817 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3818 param_cp.own_address_type = own_addr_type;
3819 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3822 memset(&enable_cp, 0, sizeof(enable_cp));
3823 enable_cp.enable = LE_SCAN_ENABLE;
3824 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3825 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3830 *status = MGMT_STATUS_INVALID_PARAMS;
3837 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3840 struct pending_cmd *cmd;
3841 unsigned long timeout;
3843 BT_DBG("status %d", status);
3847 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3849 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3852 cmd->cmd_complete(cmd, mgmt_status(status));
3853 mgmt_pending_remove(cmd);
3857 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3861 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3863 switch (hdev->discovery.type) {
3864 case DISCOV_TYPE_LE:
3865 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3867 case DISCOV_TYPE_INTERLEAVED:
3868 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3870 case DISCOV_TYPE_BREDR:
3874 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3880 queue_delayed_work(hdev->workqueue,
3881 &hdev->le_scan_disable, timeout);
3884 hci_dev_unlock(hdev);
3887 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3888 void *data, u16 len)
3890 struct mgmt_cp_start_discovery *cp = data;
3891 struct pending_cmd *cmd;
3892 struct hci_request req;
3896 BT_DBG("%s", hdev->name);
3900 if (!hdev_is_powered(hdev)) {
3901 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3902 MGMT_STATUS_NOT_POWERED,
3903 &cp->type, sizeof(cp->type));
3907 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3908 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3909 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3910 MGMT_STATUS_BUSY, &cp->type,
3915 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3921 cmd->cmd_complete = generic_cmd_complete;
3923 /* Clear the discovery filter first to free any previously
3924 * allocated memory for the UUID list.
3926 hci_discovery_filter_clear(hdev);
3928 hdev->discovery.type = cp->type;
3929 hdev->discovery.report_invalid_rssi = false;
3931 hci_req_init(&req, hdev);
3933 if (!trigger_discovery(&req, &status)) {
3934 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3935 status, &cp->type, sizeof(cp->type));
3936 mgmt_pending_remove(cmd);
3940 err = hci_req_run(&req, start_discovery_complete);
3942 mgmt_pending_remove(cmd);
3946 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3949 hci_dev_unlock(hdev);
3953 static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
3955 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3959 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3960 void *data, u16 len)
3962 struct mgmt_cp_start_service_discovery *cp = data;
3963 struct pending_cmd *cmd;
3964 struct hci_request req;
3965 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3966 u16 uuid_count, expected_len;
3970 BT_DBG("%s", hdev->name);
3974 if (!hdev_is_powered(hdev)) {
3975 err = cmd_complete(sk, hdev->id,
3976 MGMT_OP_START_SERVICE_DISCOVERY,
3977 MGMT_STATUS_NOT_POWERED,
3978 &cp->type, sizeof(cp->type));
3982 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3983 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3984 err = cmd_complete(sk, hdev->id,
3985 MGMT_OP_START_SERVICE_DISCOVERY,
3986 MGMT_STATUS_BUSY, &cp->type,
3991 uuid_count = __le16_to_cpu(cp->uuid_count);
3992 if (uuid_count > max_uuid_count) {
3993 BT_ERR("service_discovery: too big uuid_count value %u",
3995 err = cmd_complete(sk, hdev->id,
3996 MGMT_OP_START_SERVICE_DISCOVERY,
3997 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4002 expected_len = sizeof(*cp) + uuid_count * 16;
4003 if (expected_len != len) {
4004 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4006 err = cmd_complete(sk, hdev->id,
4007 MGMT_OP_START_SERVICE_DISCOVERY,
4008 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4013 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4020 cmd->cmd_complete = service_discovery_cmd_complete;
4022 /* Clear the discovery filter first to free any previously
4023 * allocated memory for the UUID list.
4025 hci_discovery_filter_clear(hdev);
4027 hdev->discovery.type = cp->type;
4028 hdev->discovery.rssi = cp->rssi;
4029 hdev->discovery.uuid_count = uuid_count;
4031 if (uuid_count > 0) {
4032 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4034 if (!hdev->discovery.uuids) {
4035 err = cmd_complete(sk, hdev->id,
4036 MGMT_OP_START_SERVICE_DISCOVERY,
4038 &cp->type, sizeof(cp->type));
4039 mgmt_pending_remove(cmd);
4044 hci_req_init(&req, hdev);
4046 if (!trigger_discovery(&req, &status)) {
4047 err = cmd_complete(sk, hdev->id,
4048 MGMT_OP_START_SERVICE_DISCOVERY,
4049 status, &cp->type, sizeof(cp->type));
4050 mgmt_pending_remove(cmd);
4054 err = hci_req_run(&req, start_discovery_complete);
4056 mgmt_pending_remove(cmd);
4060 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4063 hci_dev_unlock(hdev);
4067 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4069 struct pending_cmd *cmd;
4071 BT_DBG("status %d", status);
4075 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4077 cmd->cmd_complete(cmd, mgmt_status(status));
4078 mgmt_pending_remove(cmd);
4082 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4084 hci_dev_unlock(hdev);
4087 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4090 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4091 struct pending_cmd *cmd;
4092 struct hci_request req;
4095 BT_DBG("%s", hdev->name);
4099 if (!hci_discovery_active(hdev)) {
4100 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4101 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4102 sizeof(mgmt_cp->type));
4106 if (hdev->discovery.type != mgmt_cp->type) {
4107 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4108 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4109 sizeof(mgmt_cp->type));
4113 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4119 cmd->cmd_complete = generic_cmd_complete;
4121 hci_req_init(&req, hdev);
4123 hci_stop_discovery(&req);
4125 err = hci_req_run(&req, stop_discovery_complete);
4127 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4131 mgmt_pending_remove(cmd);
4133 /* If no HCI commands were sent we're done */
4134 if (err == -ENODATA) {
4135 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4136 &mgmt_cp->type, sizeof(mgmt_cp->type));
4137 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4141 hci_dev_unlock(hdev);
4145 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4148 struct mgmt_cp_confirm_name *cp = data;
4149 struct inquiry_entry *e;
4152 BT_DBG("%s", hdev->name);
4156 if (!hci_discovery_active(hdev)) {
4157 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4158 MGMT_STATUS_FAILED, &cp->addr,
4163 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4165 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4166 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4171 if (cp->name_known) {
4172 e->name_state = NAME_KNOWN;
4175 e->name_state = NAME_NEEDED;
4176 hci_inquiry_cache_update_resolve(hdev, e);
4179 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4183 hci_dev_unlock(hdev);
4187 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4190 struct mgmt_cp_block_device *cp = data;
4194 BT_DBG("%s", hdev->name);
4196 if (!bdaddr_type_is_valid(cp->addr.type))
4197 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4198 MGMT_STATUS_INVALID_PARAMS,
4199 &cp->addr, sizeof(cp->addr));
4203 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4206 status = MGMT_STATUS_FAILED;
4210 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4212 status = MGMT_STATUS_SUCCESS;
4215 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4216 &cp->addr, sizeof(cp->addr));
4218 hci_dev_unlock(hdev);
4223 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4226 struct mgmt_cp_unblock_device *cp = data;
4230 BT_DBG("%s", hdev->name);
4232 if (!bdaddr_type_is_valid(cp->addr.type))
4233 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4234 MGMT_STATUS_INVALID_PARAMS,
4235 &cp->addr, sizeof(cp->addr));
4239 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4242 status = MGMT_STATUS_INVALID_PARAMS;
4246 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4248 status = MGMT_STATUS_SUCCESS;
4251 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4252 &cp->addr, sizeof(cp->addr));
4254 hci_dev_unlock(hdev);
4259 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4262 struct mgmt_cp_set_device_id *cp = data;
4263 struct hci_request req;
4267 BT_DBG("%s", hdev->name);
4269 source = __le16_to_cpu(cp->source);
4271 if (source > 0x0002)
4272 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4273 MGMT_STATUS_INVALID_PARAMS);
4277 hdev->devid_source = source;
4278 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4279 hdev->devid_product = __le16_to_cpu(cp->product);
4280 hdev->devid_version = __le16_to_cpu(cp->version);
4282 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4284 hci_req_init(&req, hdev);
4286 hci_req_run(&req, NULL);
4288 hci_dev_unlock(hdev);
4293 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4296 struct cmd_lookup match = { NULL, hdev };
4301 u8 mgmt_err = mgmt_status(status);
4303 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4304 cmd_status_rsp, &mgmt_err);
4308 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4309 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4311 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4313 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4316 new_settings(hdev, match.sk);
4322 hci_dev_unlock(hdev);
4325 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4328 struct mgmt_mode *cp = data;
4329 struct pending_cmd *cmd;
4330 struct hci_request req;
4331 u8 val, enabled, status;
4334 BT_DBG("request for %s", hdev->name);
4336 status = mgmt_le_support(hdev);
4338 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4341 if (cp->val != 0x00 && cp->val != 0x01)
4342 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4343 MGMT_STATUS_INVALID_PARAMS);
4348 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4350 /* The following conditions are ones which mean that we should
4351 * not do any HCI communication but directly send a mgmt
4352 * response to user space (after toggling the flag if
4355 if (!hdev_is_powered(hdev) || val == enabled ||
4356 hci_conn_num(hdev, LE_LINK) > 0 ||
4357 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4358 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4359 bool changed = false;
4361 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4362 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4366 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4371 err = new_settings(hdev, sk);
4376 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4377 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4378 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4383 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4389 hci_req_init(&req, hdev);
4392 enable_advertising(&req);
4394 disable_advertising(&req);
4396 err = hci_req_run(&req, set_advertising_complete);
4398 mgmt_pending_remove(cmd);
4401 hci_dev_unlock(hdev);
4405 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4406 void *data, u16 len)
4408 struct mgmt_cp_set_static_address *cp = data;
4411 BT_DBG("%s", hdev->name);
4413 if (!lmp_le_capable(hdev))
4414 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4415 MGMT_STATUS_NOT_SUPPORTED);
4417 if (hdev_is_powered(hdev))
4418 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4419 MGMT_STATUS_REJECTED);
4421 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4422 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4423 return cmd_status(sk, hdev->id,
4424 MGMT_OP_SET_STATIC_ADDRESS,
4425 MGMT_STATUS_INVALID_PARAMS);
4427 /* Two most significant bits shall be set */
4428 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4429 return cmd_status(sk, hdev->id,
4430 MGMT_OP_SET_STATIC_ADDRESS,
4431 MGMT_STATUS_INVALID_PARAMS);
4436 bacpy(&hdev->static_addr, &cp->bdaddr);
4438 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4440 hci_dev_unlock(hdev);
4445 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4446 void *data, u16 len)
4448 struct mgmt_cp_set_scan_params *cp = data;
4449 __u16 interval, window;
4452 BT_DBG("%s", hdev->name);
4454 if (!lmp_le_capable(hdev))
4455 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4456 MGMT_STATUS_NOT_SUPPORTED);
4458 interval = __le16_to_cpu(cp->interval);
4460 if (interval < 0x0004 || interval > 0x4000)
4461 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4462 MGMT_STATUS_INVALID_PARAMS);
4464 window = __le16_to_cpu(cp->window);
4466 if (window < 0x0004 || window > 0x4000)
4467 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4468 MGMT_STATUS_INVALID_PARAMS);
4470 if (window > interval)
4471 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4472 MGMT_STATUS_INVALID_PARAMS);
4476 hdev->le_scan_interval = interval;
4477 hdev->le_scan_window = window;
4479 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4481 /* If background scan is running, restart it so new parameters are
4484 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4485 hdev->discovery.state == DISCOVERY_STOPPED) {
4486 struct hci_request req;
4488 hci_req_init(&req, hdev);
4490 hci_req_add_le_scan_disable(&req);
4491 hci_req_add_le_passive_scan(&req);
4493 hci_req_run(&req, NULL);
4496 hci_dev_unlock(hdev);
4501 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4504 struct pending_cmd *cmd;
4506 BT_DBG("status 0x%02x", status);
4510 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4515 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4516 mgmt_status(status));
4518 struct mgmt_mode *cp = cmd->param;
4521 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4523 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4525 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4526 new_settings(hdev, cmd->sk);
4529 mgmt_pending_remove(cmd);
4532 hci_dev_unlock(hdev);
4535 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4536 void *data, u16 len)
4538 struct mgmt_mode *cp = data;
4539 struct pending_cmd *cmd;
4540 struct hci_request req;
4543 BT_DBG("%s", hdev->name);
4545 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4546 hdev->hci_ver < BLUETOOTH_VER_1_2)
4547 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4548 MGMT_STATUS_NOT_SUPPORTED);
4550 if (cp->val != 0x00 && cp->val != 0x01)
4551 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4552 MGMT_STATUS_INVALID_PARAMS);
4554 if (!hdev_is_powered(hdev))
4555 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4556 MGMT_STATUS_NOT_POWERED);
4558 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4559 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4560 MGMT_STATUS_REJECTED);
4564 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4565 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4570 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4571 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4576 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4583 hci_req_init(&req, hdev);
4585 write_fast_connectable(&req, cp->val);
4587 err = hci_req_run(&req, fast_connectable_complete);
4589 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4590 MGMT_STATUS_FAILED);
4591 mgmt_pending_remove(cmd);
4595 hci_dev_unlock(hdev);
4600 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4602 struct pending_cmd *cmd;
4604 BT_DBG("status 0x%02x", status);
4608 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4613 u8 mgmt_err = mgmt_status(status);
4615 /* We need to restore the flag if related HCI commands
4618 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4620 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4622 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4623 new_settings(hdev, cmd->sk);
4626 mgmt_pending_remove(cmd);
4629 hci_dev_unlock(hdev);
4632 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4634 struct mgmt_mode *cp = data;
4635 struct pending_cmd *cmd;
4636 struct hci_request req;
4639 BT_DBG("request for %s", hdev->name);
4641 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4642 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4643 MGMT_STATUS_NOT_SUPPORTED);
4645 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4646 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4647 MGMT_STATUS_REJECTED);
4649 if (cp->val != 0x00 && cp->val != 0x01)
4650 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4651 MGMT_STATUS_INVALID_PARAMS);
4655 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4656 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4660 if (!hdev_is_powered(hdev)) {
4662 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4663 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4664 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4665 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4666 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4669 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4671 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4675 err = new_settings(hdev, sk);
4679 /* Reject disabling when powered on */
4681 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4682 MGMT_STATUS_REJECTED);
4685 /* When configuring a dual-mode controller to operate
4686 * with LE only and using a static address, then switching
4687 * BR/EDR back on is not allowed.
4689 * Dual-mode controllers shall operate with the public
4690 * address as its identity address for BR/EDR and LE. So
4691 * reject the attempt to create an invalid configuration.
4693 * The same restrictions applies when secure connections
4694 * has been enabled. For BR/EDR this is a controller feature
4695 * while for LE it is a host stack feature. This means that
4696 * switching BR/EDR back on when secure connections has been
4697 * enabled is not a supported transaction.
4699 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4700 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4701 test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) {
4702 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4703 MGMT_STATUS_REJECTED);
4708 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4709 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4714 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4720 /* We need to flip the bit already here so that update_adv_data
4721 * generates the correct flags.
4723 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4725 hci_req_init(&req, hdev);
4727 write_fast_connectable(&req, false);
4728 __hci_update_page_scan(&req);
4730 /* Since only the advertising data flags will change, there
4731 * is no need to update the scan response data.
4733 update_adv_data(&req);
4735 err = hci_req_run(&req, set_bredr_complete);
4737 mgmt_pending_remove(cmd);
4740 hci_dev_unlock(hdev);
4744 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4745 void *data, u16 len)
4747 struct mgmt_mode *cp = data;
4748 struct pending_cmd *cmd;
4752 BT_DBG("request for %s", hdev->name);
4754 if (!lmp_sc_capable(hdev) &&
4755 !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4756 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4757 MGMT_STATUS_NOT_SUPPORTED);
4759 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4760 !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4761 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4762 MGMT_STATUS_REJECTED);
4764 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4765 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4766 MGMT_STATUS_INVALID_PARAMS);
4770 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4771 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4775 changed = !test_and_set_bit(HCI_SC_ENABLED,
4777 if (cp->val == 0x02)
4778 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4780 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4782 changed = test_and_clear_bit(HCI_SC_ENABLED,
4784 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4787 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4792 err = new_settings(hdev, sk);
4797 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4798 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4805 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4806 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4807 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4811 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4817 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4819 mgmt_pending_remove(cmd);
4823 if (cp->val == 0x02)
4824 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4826 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4829 hci_dev_unlock(hdev);
4833 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4834 void *data, u16 len)
4836 struct mgmt_mode *cp = data;
4837 bool changed, use_changed;
4840 BT_DBG("request for %s", hdev->name);
4842 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4843 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4844 MGMT_STATUS_INVALID_PARAMS);
4849 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4852 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4855 if (cp->val == 0x02)
4856 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4859 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4862 if (hdev_is_powered(hdev) && use_changed &&
4863 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4864 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4865 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4866 sizeof(mode), &mode);
4869 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4874 err = new_settings(hdev, sk);
4877 hci_dev_unlock(hdev);
4881 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4884 struct mgmt_cp_set_privacy *cp = cp_data;
4888 BT_DBG("request for %s", hdev->name);
4890 if (!lmp_le_capable(hdev))
4891 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4892 MGMT_STATUS_NOT_SUPPORTED);
4894 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4895 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4896 MGMT_STATUS_INVALID_PARAMS);
4898 if (hdev_is_powered(hdev))
4899 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4900 MGMT_STATUS_REJECTED);
4904 /* If user space supports this command it is also expected to
4905 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4907 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4910 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4911 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4912 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4914 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4915 memset(hdev->irk, 0, sizeof(hdev->irk));
4916 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4919 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4924 err = new_settings(hdev, sk);
4927 hci_dev_unlock(hdev);
4931 static bool irk_is_valid(struct mgmt_irk_info *irk)
4933 switch (irk->addr.type) {
4934 case BDADDR_LE_PUBLIC:
4937 case BDADDR_LE_RANDOM:
4938 /* Two most significant bits shall be set */
4939 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4947 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4950 struct mgmt_cp_load_irks *cp = cp_data;
4951 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4952 sizeof(struct mgmt_irk_info));
4953 u16 irk_count, expected_len;
4956 BT_DBG("request for %s", hdev->name);
4958 if (!lmp_le_capable(hdev))
4959 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4960 MGMT_STATUS_NOT_SUPPORTED);
4962 irk_count = __le16_to_cpu(cp->irk_count);
4963 if (irk_count > max_irk_count) {
4964 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4965 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4966 MGMT_STATUS_INVALID_PARAMS);
4969 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4970 if (expected_len != len) {
4971 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4973 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4974 MGMT_STATUS_INVALID_PARAMS);
4977 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4979 for (i = 0; i < irk_count; i++) {
4980 struct mgmt_irk_info *key = &cp->irks[i];
4982 if (!irk_is_valid(key))
4983 return cmd_status(sk, hdev->id,
4985 MGMT_STATUS_INVALID_PARAMS);
4990 hci_smp_irks_clear(hdev);
4992 for (i = 0; i < irk_count; i++) {
4993 struct mgmt_irk_info *irk = &cp->irks[i];
4996 if (irk->addr.type == BDADDR_LE_PUBLIC)
4997 addr_type = ADDR_LE_DEV_PUBLIC;
4999 addr_type = ADDR_LE_DEV_RANDOM;
5001 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5005 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5007 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5009 hci_dev_unlock(hdev);
5014 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5016 if (key->master != 0x00 && key->master != 0x01)
5019 switch (key->addr.type) {
5020 case BDADDR_LE_PUBLIC:
5023 case BDADDR_LE_RANDOM:
5024 /* Two most significant bits shall be set */
5025 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5033 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5034 void *cp_data, u16 len)
5036 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5037 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5038 sizeof(struct mgmt_ltk_info));
5039 u16 key_count, expected_len;
5042 BT_DBG("request for %s", hdev->name);
5044 if (!lmp_le_capable(hdev))
5045 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5046 MGMT_STATUS_NOT_SUPPORTED);
5048 key_count = __le16_to_cpu(cp->key_count);
5049 if (key_count > max_key_count) {
5050 BT_ERR("load_ltks: too big key_count value %u", key_count);
5051 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5052 MGMT_STATUS_INVALID_PARAMS);
5055 expected_len = sizeof(*cp) + key_count *
5056 sizeof(struct mgmt_ltk_info);
5057 if (expected_len != len) {
5058 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5060 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5061 MGMT_STATUS_INVALID_PARAMS);
5064 BT_DBG("%s key_count %u", hdev->name, key_count);
5066 for (i = 0; i < key_count; i++) {
5067 struct mgmt_ltk_info *key = &cp->keys[i];
5069 if (!ltk_is_valid(key))
5070 return cmd_status(sk, hdev->id,
5071 MGMT_OP_LOAD_LONG_TERM_KEYS,
5072 MGMT_STATUS_INVALID_PARAMS);
5077 hci_smp_ltks_clear(hdev);
5079 for (i = 0; i < key_count; i++) {
5080 struct mgmt_ltk_info *key = &cp->keys[i];
5081 u8 type, addr_type, authenticated;
5083 if (key->addr.type == BDADDR_LE_PUBLIC)
5084 addr_type = ADDR_LE_DEV_PUBLIC;
5086 addr_type = ADDR_LE_DEV_RANDOM;
5088 switch (key->type) {
5089 case MGMT_LTK_UNAUTHENTICATED:
5090 authenticated = 0x00;
5091 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5093 case MGMT_LTK_AUTHENTICATED:
5094 authenticated = 0x01;
5095 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5097 case MGMT_LTK_P256_UNAUTH:
5098 authenticated = 0x00;
5099 type = SMP_LTK_P256;
5101 case MGMT_LTK_P256_AUTH:
5102 authenticated = 0x01;
5103 type = SMP_LTK_P256;
5105 case MGMT_LTK_P256_DEBUG:
5106 authenticated = 0x00;
5107 type = SMP_LTK_P256_DEBUG;
5112 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5113 authenticated, key->val, key->enc_size, key->ediv,
5117 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5120 hci_dev_unlock(hdev);
5125 static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5127 struct hci_conn *conn = cmd->user_data;
5128 struct mgmt_rp_get_conn_info rp;
5131 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5133 if (status == MGMT_STATUS_SUCCESS) {
5134 rp.rssi = conn->rssi;
5135 rp.tx_power = conn->tx_power;
5136 rp.max_tx_power = conn->max_tx_power;
5138 rp.rssi = HCI_RSSI_INVALID;
5139 rp.tx_power = HCI_TX_POWER_INVALID;
5140 rp.max_tx_power = HCI_TX_POWER_INVALID;
5143 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5146 hci_conn_drop(conn);
5152 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5155 struct hci_cp_read_rssi *cp;
5156 struct pending_cmd *cmd;
5157 struct hci_conn *conn;
5161 BT_DBG("status 0x%02x", hci_status);
5165 /* Commands sent in request are either Read RSSI or Read Transmit Power
5166 * Level so we check which one was last sent to retrieve connection
5167 * handle. Both commands have handle as first parameter so it's safe to
5168 * cast data on the same command struct.
5170 * First command sent is always Read RSSI and we fail only if it fails.
5171 * In other case we simply override error to indicate success as we
5172 * already remembered if TX power value is actually valid.
5174 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5176 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5177 status = MGMT_STATUS_SUCCESS;
5179 status = mgmt_status(hci_status);
5183 BT_ERR("invalid sent_cmd in conn_info response");
5187 handle = __le16_to_cpu(cp->handle);
5188 conn = hci_conn_hash_lookup_handle(hdev, handle);
5190 BT_ERR("unknown handle (%d) in conn_info response", handle);
5194 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5198 cmd->cmd_complete(cmd, status);
5199 mgmt_pending_remove(cmd);
5202 hci_dev_unlock(hdev);
5205 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5208 struct mgmt_cp_get_conn_info *cp = data;
5209 struct mgmt_rp_get_conn_info rp;
5210 struct hci_conn *conn;
5211 unsigned long conn_info_age;
5214 BT_DBG("%s", hdev->name);
5216 memset(&rp, 0, sizeof(rp));
5217 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5218 rp.addr.type = cp->addr.type;
5220 if (!bdaddr_type_is_valid(cp->addr.type))
5221 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5222 MGMT_STATUS_INVALID_PARAMS,
5227 if (!hdev_is_powered(hdev)) {
5228 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5229 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5233 if (cp->addr.type == BDADDR_BREDR)
5234 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5237 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5239 if (!conn || conn->state != BT_CONNECTED) {
5240 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5241 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5245 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5246 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5247 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5251 /* To avoid client trying to guess when to poll again for information we
5252 * calculate conn info age as random value between min/max set in hdev.
5254 conn_info_age = hdev->conn_info_min_age +
5255 prandom_u32_max(hdev->conn_info_max_age -
5256 hdev->conn_info_min_age);
5258 /* Query controller to refresh cached values if they are too old or were
5261 if (time_after(jiffies, conn->conn_info_timestamp +
5262 msecs_to_jiffies(conn_info_age)) ||
5263 !conn->conn_info_timestamp) {
5264 struct hci_request req;
5265 struct hci_cp_read_tx_power req_txp_cp;
5266 struct hci_cp_read_rssi req_rssi_cp;
5267 struct pending_cmd *cmd;
5269 hci_req_init(&req, hdev);
5270 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5271 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5274 /* For LE links TX power does not change thus we don't need to
5275 * query for it once value is known.
5277 if (!bdaddr_type_is_le(cp->addr.type) ||
5278 conn->tx_power == HCI_TX_POWER_INVALID) {
5279 req_txp_cp.handle = cpu_to_le16(conn->handle);
5280 req_txp_cp.type = 0x00;
5281 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5282 sizeof(req_txp_cp), &req_txp_cp);
5285 /* Max TX power needs to be read only once per connection */
5286 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5287 req_txp_cp.handle = cpu_to_le16(conn->handle);
5288 req_txp_cp.type = 0x01;
5289 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5290 sizeof(req_txp_cp), &req_txp_cp);
5293 err = hci_req_run(&req, conn_info_refresh_complete);
5297 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5304 hci_conn_hold(conn);
5305 cmd->user_data = hci_conn_get(conn);
5306 cmd->cmd_complete = conn_info_cmd_complete;
5308 conn->conn_info_timestamp = jiffies;
5310 /* Cache is valid, just reply with values cached in hci_conn */
5311 rp.rssi = conn->rssi;
5312 rp.tx_power = conn->tx_power;
5313 rp.max_tx_power = conn->max_tx_power;
5315 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5316 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5320 hci_dev_unlock(hdev);
5324 static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5326 struct hci_conn *conn = cmd->user_data;
5327 struct mgmt_rp_get_clock_info rp;
5328 struct hci_dev *hdev;
5331 memset(&rp, 0, sizeof(rp));
5332 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5337 hdev = hci_dev_get(cmd->index);
5339 rp.local_clock = cpu_to_le32(hdev->clock);
5344 rp.piconet_clock = cpu_to_le32(conn->clock);
5345 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5349 err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5353 hci_conn_drop(conn);
5360 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5362 struct hci_cp_read_clock *hci_cp;
5363 struct pending_cmd *cmd;
5364 struct hci_conn *conn;
5366 BT_DBG("%s status %u", hdev->name, status);
5370 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5374 if (hci_cp->which) {
5375 u16 handle = __le16_to_cpu(hci_cp->handle);
5376 conn = hci_conn_hash_lookup_handle(hdev, handle);
5381 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5385 cmd->cmd_complete(cmd, mgmt_status(status));
5386 mgmt_pending_remove(cmd);
5389 hci_dev_unlock(hdev);
5392 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5395 struct mgmt_cp_get_clock_info *cp = data;
5396 struct mgmt_rp_get_clock_info rp;
5397 struct hci_cp_read_clock hci_cp;
5398 struct pending_cmd *cmd;
5399 struct hci_request req;
5400 struct hci_conn *conn;
5403 BT_DBG("%s", hdev->name);
5405 memset(&rp, 0, sizeof(rp));
5406 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5407 rp.addr.type = cp->addr.type;
5409 if (cp->addr.type != BDADDR_BREDR)
5410 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5411 MGMT_STATUS_INVALID_PARAMS,
5416 if (!hdev_is_powered(hdev)) {
5417 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5418 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5422 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5423 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5425 if (!conn || conn->state != BT_CONNECTED) {
5426 err = cmd_complete(sk, hdev->id,
5427 MGMT_OP_GET_CLOCK_INFO,
5428 MGMT_STATUS_NOT_CONNECTED,
5436 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5442 cmd->cmd_complete = clock_info_cmd_complete;
5444 hci_req_init(&req, hdev);
5446 memset(&hci_cp, 0, sizeof(hci_cp));
5447 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5450 hci_conn_hold(conn);
5451 cmd->user_data = hci_conn_get(conn);
5453 hci_cp.handle = cpu_to_le16(conn->handle);
5454 hci_cp.which = 0x01; /* Piconet clock */
5455 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5458 err = hci_req_run(&req, get_clock_info_complete);
5460 mgmt_pending_remove(cmd);
5463 hci_dev_unlock(hdev);
5467 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5469 struct hci_conn *conn;
5471 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5475 if (conn->dst_type != type)
5478 if (conn->state != BT_CONNECTED)
5484 /* This function requires the caller holds hdev->lock */
5485 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5486 u8 addr_type, u8 auto_connect)
5488 struct hci_dev *hdev = req->hdev;
5489 struct hci_conn_params *params;
5491 params = hci_conn_params_add(hdev, addr, addr_type);
5495 if (params->auto_connect == auto_connect)
5498 list_del_init(¶ms->action);
5500 switch (auto_connect) {
5501 case HCI_AUTO_CONN_DISABLED:
5502 case HCI_AUTO_CONN_LINK_LOSS:
5503 __hci_update_background_scan(req);
5505 case HCI_AUTO_CONN_REPORT:
5506 list_add(¶ms->action, &hdev->pend_le_reports);
5507 __hci_update_background_scan(req);
5509 case HCI_AUTO_CONN_DIRECT:
5510 case HCI_AUTO_CONN_ALWAYS:
5511 if (!is_connected(hdev, addr, addr_type)) {
5512 list_add(¶ms->action, &hdev->pend_le_conns);
5513 __hci_update_background_scan(req);
5518 params->auto_connect = auto_connect;
5520 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5526 static void device_added(struct sock *sk, struct hci_dev *hdev,
5527 bdaddr_t *bdaddr, u8 type, u8 action)
5529 struct mgmt_ev_device_added ev;
5531 bacpy(&ev.addr.bdaddr, bdaddr);
5532 ev.addr.type = type;
5535 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5538 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5540 struct pending_cmd *cmd;
5542 BT_DBG("status 0x%02x", status);
5546 cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
5550 cmd->cmd_complete(cmd, mgmt_status(status));
5551 mgmt_pending_remove(cmd);
5554 hci_dev_unlock(hdev);
5557 static int add_device(struct sock *sk, struct hci_dev *hdev,
5558 void *data, u16 len)
5560 struct mgmt_cp_add_device *cp = data;
5561 struct pending_cmd *cmd;
5562 struct hci_request req;
5563 u8 auto_conn, addr_type;
5566 BT_DBG("%s", hdev->name);
5568 if (!bdaddr_type_is_valid(cp->addr.type) ||
5569 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5570 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5571 MGMT_STATUS_INVALID_PARAMS,
5572 &cp->addr, sizeof(cp->addr));
5574 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5575 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5576 MGMT_STATUS_INVALID_PARAMS,
5577 &cp->addr, sizeof(cp->addr));
5579 hci_req_init(&req, hdev);
5583 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5589 cmd->cmd_complete = addr_cmd_complete;
5591 if (cp->addr.type == BDADDR_BREDR) {
5592 /* Only incoming connections action is supported for now */
5593 if (cp->action != 0x01) {
5594 err = cmd->cmd_complete(cmd,
5595 MGMT_STATUS_INVALID_PARAMS);
5596 mgmt_pending_remove(cmd);
5600 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5605 __hci_update_page_scan(&req);
5610 if (cp->addr.type == BDADDR_LE_PUBLIC)
5611 addr_type = ADDR_LE_DEV_PUBLIC;
5613 addr_type = ADDR_LE_DEV_RANDOM;
5615 if (cp->action == 0x02)
5616 auto_conn = HCI_AUTO_CONN_ALWAYS;
5617 else if (cp->action == 0x01)
5618 auto_conn = HCI_AUTO_CONN_DIRECT;
5620 auto_conn = HCI_AUTO_CONN_REPORT;
5622 /* If the connection parameters don't exist for this device,
5623 * they will be created and configured with defaults.
5625 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5627 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5628 mgmt_pending_remove(cmd);
5633 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5635 err = hci_req_run(&req, add_device_complete);
5637 /* ENODATA means no HCI commands were needed (e.g. if
5638 * the adapter is powered off).
5640 if (err == -ENODATA)
5641 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5642 mgmt_pending_remove(cmd);
5646 hci_dev_unlock(hdev);
5650 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5651 bdaddr_t *bdaddr, u8 type)
5653 struct mgmt_ev_device_removed ev;
5655 bacpy(&ev.addr.bdaddr, bdaddr);
5656 ev.addr.type = type;
5658 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5661 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5663 struct pending_cmd *cmd;
5665 BT_DBG("status 0x%02x", status);
5669 cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5673 cmd->cmd_complete(cmd, mgmt_status(status));
5674 mgmt_pending_remove(cmd);
5677 hci_dev_unlock(hdev);
5680 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5681 void *data, u16 len)
5683 struct mgmt_cp_remove_device *cp = data;
5684 struct pending_cmd *cmd;
5685 struct hci_request req;
5688 BT_DBG("%s", hdev->name);
5690 hci_req_init(&req, hdev);
5694 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5700 cmd->cmd_complete = addr_cmd_complete;
5702 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5703 struct hci_conn_params *params;
5706 if (!bdaddr_type_is_valid(cp->addr.type)) {
5707 err = cmd->cmd_complete(cmd,
5708 MGMT_STATUS_INVALID_PARAMS);
5709 mgmt_pending_remove(cmd);
5713 if (cp->addr.type == BDADDR_BREDR) {
5714 err = hci_bdaddr_list_del(&hdev->whitelist,
5718 err = cmd->cmd_complete(cmd,
5719 MGMT_STATUS_INVALID_PARAMS);
5720 mgmt_pending_remove(cmd);
5724 __hci_update_page_scan(&req);
5726 device_removed(sk, hdev, &cp->addr.bdaddr,
5731 if (cp->addr.type == BDADDR_LE_PUBLIC)
5732 addr_type = ADDR_LE_DEV_PUBLIC;
5734 addr_type = ADDR_LE_DEV_RANDOM;
5736 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5739 err = cmd->cmd_complete(cmd,
5740 MGMT_STATUS_INVALID_PARAMS);
5741 mgmt_pending_remove(cmd);
5745 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5746 err = cmd->cmd_complete(cmd,
5747 MGMT_STATUS_INVALID_PARAMS);
5748 mgmt_pending_remove(cmd);
5752 list_del(¶ms->action);
5753 list_del(¶ms->list);
5755 __hci_update_background_scan(&req);
5757 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5759 struct hci_conn_params *p, *tmp;
5760 struct bdaddr_list *b, *btmp;
5762 if (cp->addr.type) {
5763 err = cmd->cmd_complete(cmd,
5764 MGMT_STATUS_INVALID_PARAMS);
5765 mgmt_pending_remove(cmd);
5769 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5770 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5775 __hci_update_page_scan(&req);
5777 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5778 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5780 device_removed(sk, hdev, &p->addr, p->addr_type);
5781 list_del(&p->action);
5786 BT_DBG("All LE connection parameters were removed");
5788 __hci_update_background_scan(&req);
5792 err = hci_req_run(&req, remove_device_complete);
5794 /* ENODATA means no HCI commands were needed (e.g. if
5795 * the adapter is powered off).
5797 if (err == -ENODATA)
5798 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5799 mgmt_pending_remove(cmd);
5803 hci_dev_unlock(hdev);
5807 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5810 struct mgmt_cp_load_conn_param *cp = data;
5811 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5812 sizeof(struct mgmt_conn_param));
5813 u16 param_count, expected_len;
5816 if (!lmp_le_capable(hdev))
5817 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5818 MGMT_STATUS_NOT_SUPPORTED);
5820 param_count = __le16_to_cpu(cp->param_count);
5821 if (param_count > max_param_count) {
5822 BT_ERR("load_conn_param: too big param_count value %u",
5824 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5825 MGMT_STATUS_INVALID_PARAMS);
5828 expected_len = sizeof(*cp) + param_count *
5829 sizeof(struct mgmt_conn_param);
5830 if (expected_len != len) {
5831 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5833 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5834 MGMT_STATUS_INVALID_PARAMS);
5837 BT_DBG("%s param_count %u", hdev->name, param_count);
5841 hci_conn_params_clear_disabled(hdev);
5843 for (i = 0; i < param_count; i++) {
5844 struct mgmt_conn_param *param = &cp->params[i];
5845 struct hci_conn_params *hci_param;
5846 u16 min, max, latency, timeout;
5849 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5852 if (param->addr.type == BDADDR_LE_PUBLIC) {
5853 addr_type = ADDR_LE_DEV_PUBLIC;
5854 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5855 addr_type = ADDR_LE_DEV_RANDOM;
5857 BT_ERR("Ignoring invalid connection parameters");
5861 min = le16_to_cpu(param->min_interval);
5862 max = le16_to_cpu(param->max_interval);
5863 latency = le16_to_cpu(param->latency);
5864 timeout = le16_to_cpu(param->timeout);
5866 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5867 min, max, latency, timeout);
5869 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5870 BT_ERR("Ignoring invalid connection parameters");
5874 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5877 BT_ERR("Failed to add connection parameters");
5881 hci_param->conn_min_interval = min;
5882 hci_param->conn_max_interval = max;
5883 hci_param->conn_latency = latency;
5884 hci_param->supervision_timeout = timeout;
5887 hci_dev_unlock(hdev);
5889 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5892 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5893 void *data, u16 len)
5895 struct mgmt_cp_set_external_config *cp = data;
5899 BT_DBG("%s", hdev->name);
5901 if (hdev_is_powered(hdev))
5902 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5903 MGMT_STATUS_REJECTED);
5905 if (cp->config != 0x00 && cp->config != 0x01)
5906 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5907 MGMT_STATUS_INVALID_PARAMS);
5909 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5910 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5911 MGMT_STATUS_NOT_SUPPORTED);
5916 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5919 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5922 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5929 err = new_options(hdev, sk);
5931 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5932 mgmt_index_removed(hdev);
5934 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5935 set_bit(HCI_CONFIG, &hdev->dev_flags);
5936 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5938 queue_work(hdev->req_workqueue, &hdev->power_on);
5940 set_bit(HCI_RAW, &hdev->flags);
5941 mgmt_index_added(hdev);
5946 hci_dev_unlock(hdev);
5950 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5951 void *data, u16 len)
5953 struct mgmt_cp_set_public_address *cp = data;
5957 BT_DBG("%s", hdev->name);
5959 if (hdev_is_powered(hdev))
5960 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5961 MGMT_STATUS_REJECTED);
5963 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5964 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5965 MGMT_STATUS_INVALID_PARAMS);
5967 if (!hdev->set_bdaddr)
5968 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5969 MGMT_STATUS_NOT_SUPPORTED);
5973 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5974 bacpy(&hdev->public_addr, &cp->bdaddr);
5976 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5983 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5984 err = new_options(hdev, sk);
5986 if (is_configured(hdev)) {
5987 mgmt_index_removed(hdev);
5989 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5991 set_bit(HCI_CONFIG, &hdev->dev_flags);
5992 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5994 queue_work(hdev->req_workqueue, &hdev->power_on);
5998 hci_dev_unlock(hdev);
6002 static const struct mgmt_handler {
6003 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
6007 } mgmt_handlers[] = {
6008 { NULL }, /* 0x0000 (no command) */
6009 { read_version, false, MGMT_READ_VERSION_SIZE },
6010 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
6011 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
6012 { read_controller_info, false, MGMT_READ_INFO_SIZE },
6013 { set_powered, false, MGMT_SETTING_SIZE },
6014 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
6015 { set_connectable, false, MGMT_SETTING_SIZE },
6016 { set_fast_connectable, false, MGMT_SETTING_SIZE },
6017 { set_bondable, false, MGMT_SETTING_SIZE },
6018 { set_link_security, false, MGMT_SETTING_SIZE },
6019 { set_ssp, false, MGMT_SETTING_SIZE },
6020 { set_hs, false, MGMT_SETTING_SIZE },
6021 { set_le, false, MGMT_SETTING_SIZE },
6022 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
6023 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
6024 { add_uuid, false, MGMT_ADD_UUID_SIZE },
6025 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
6026 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
6027 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
6028 { disconnect, false, MGMT_DISCONNECT_SIZE },
6029 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
6030 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
6031 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6032 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
6033 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
6034 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6035 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
6036 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
6037 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6038 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
6039 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6040 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6041 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
6042 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6043 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
6044 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
6045 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
6046 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
6047 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
6048 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
6049 { set_advertising, false, MGMT_SETTING_SIZE },
6050 { set_bredr, false, MGMT_SETTING_SIZE },
6051 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
6052 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
6053 { set_secure_conn, false, MGMT_SETTING_SIZE },
6054 { set_debug_keys, false, MGMT_SETTING_SIZE },
6055 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
6056 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
6057 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
6058 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
6059 { add_device, false, MGMT_ADD_DEVICE_SIZE },
6060 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
6061 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
6062 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
6063 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
6064 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
6065 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
6066 { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
6069 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
6073 struct mgmt_hdr *hdr;
6074 u16 opcode, index, len;
6075 struct hci_dev *hdev = NULL;
6076 const struct mgmt_handler *handler;
6079 BT_DBG("got %zu bytes", msglen);
6081 if (msglen < sizeof(*hdr))
6084 buf = kmalloc(msglen, GFP_KERNEL);
6088 if (memcpy_from_msg(buf, msg, msglen)) {
6094 opcode = __le16_to_cpu(hdr->opcode);
6095 index = __le16_to_cpu(hdr->index);
6096 len = __le16_to_cpu(hdr->len);
6098 if (len != msglen - sizeof(*hdr)) {
6103 if (index != MGMT_INDEX_NONE) {
6104 hdev = hci_dev_get(index);
6106 err = cmd_status(sk, index, opcode,
6107 MGMT_STATUS_INVALID_INDEX);
6111 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
6112 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
6113 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
6114 err = cmd_status(sk, index, opcode,
6115 MGMT_STATUS_INVALID_INDEX);
6119 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
6120 opcode != MGMT_OP_READ_CONFIG_INFO &&
6121 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
6122 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
6123 err = cmd_status(sk, index, opcode,
6124 MGMT_STATUS_INVALID_INDEX);
6129 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
6130 mgmt_handlers[opcode].func == NULL) {
6131 BT_DBG("Unknown op %u", opcode);
6132 err = cmd_status(sk, index, opcode,
6133 MGMT_STATUS_UNKNOWN_COMMAND);
6137 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
6138 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6139 err = cmd_status(sk, index, opcode,
6140 MGMT_STATUS_INVALID_INDEX);
6144 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
6145 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6146 err = cmd_status(sk, index, opcode,
6147 MGMT_STATUS_INVALID_INDEX);
6151 handler = &mgmt_handlers[opcode];
6153 if ((handler->var_len && len < handler->data_len) ||
6154 (!handler->var_len && len != handler->data_len)) {
6155 err = cmd_status(sk, index, opcode,
6156 MGMT_STATUS_INVALID_PARAMS);
6161 mgmt_init_hdev(sk, hdev);
6163 cp = buf + sizeof(*hdr);
6165 err = handler->func(sk, hdev, cp, len);
6179 void mgmt_index_added(struct hci_dev *hdev)
6181 if (hdev->dev_type != HCI_BREDR)
6184 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6187 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6188 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6190 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6193 void mgmt_index_removed(struct hci_dev *hdev)
6195 u8 status = MGMT_STATUS_INVALID_INDEX;
6197 if (hdev->dev_type != HCI_BREDR)
6200 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6203 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6205 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6206 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6208 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6211 /* This function requires the caller holds hdev->lock */
6212 static void restart_le_actions(struct hci_request *req)
6214 struct hci_dev *hdev = req->hdev;
6215 struct hci_conn_params *p;
6217 list_for_each_entry(p, &hdev->le_conn_params, list) {
6218 /* Needed for AUTO_OFF case where might not "really"
6219 * have been powered off.
6221 list_del_init(&p->action);
6223 switch (p->auto_connect) {
6224 case HCI_AUTO_CONN_DIRECT:
6225 case HCI_AUTO_CONN_ALWAYS:
6226 list_add(&p->action, &hdev->pend_le_conns);
6228 case HCI_AUTO_CONN_REPORT:
6229 list_add(&p->action, &hdev->pend_le_reports);
6236 __hci_update_background_scan(req);
6239 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6241 struct cmd_lookup match = { NULL, hdev };
6243 BT_DBG("status 0x%02x", status);
6246 /* Register the available SMP channels (BR/EDR and LE) only
6247 * when successfully powering on the controller. This late
6248 * registration is required so that LE SMP can clearly
6249 * decide if the public address or static address is used.
6256 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6258 new_settings(hdev, match.sk);
6260 hci_dev_unlock(hdev);
6266 static int powered_update_hci(struct hci_dev *hdev)
6268 struct hci_request req;
6271 hci_req_init(&req, hdev);
6273 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6274 !lmp_host_ssp_capable(hdev)) {
6277 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
6279 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6282 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
6283 sizeof(support), &support);
6287 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6288 lmp_bredr_capable(hdev)) {
6289 struct hci_cp_write_le_host_supported cp;
6294 /* Check first if we already have the right
6295 * host state (host features set)
6297 if (cp.le != lmp_host_le_capable(hdev) ||
6298 cp.simul != lmp_host_le_br_capable(hdev))
6299 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6303 if (lmp_le_capable(hdev)) {
6304 /* Make sure the controller has a good default for
6305 * advertising data. This also applies to the case
6306 * where BR/EDR was toggled during the AUTO_OFF phase.
6308 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6309 update_adv_data(&req);
6310 update_scan_rsp_data(&req);
6313 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6314 enable_advertising(&req);
6316 restart_le_actions(&req);
6319 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6320 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6321 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6322 sizeof(link_sec), &link_sec);
6324 if (lmp_bredr_capable(hdev)) {
6325 write_fast_connectable(&req, false);
6326 __hci_update_page_scan(&req);
6332 return hci_req_run(&req, powered_complete);
6335 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6337 struct cmd_lookup match = { NULL, hdev };
6338 u8 status, zero_cod[] = { 0, 0, 0 };
6341 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6345 if (powered_update_hci(hdev) == 0)
6348 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6353 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6355 /* If the power off is because of hdev unregistration let
6356 * use the appropriate INVALID_INDEX status. Otherwise use
6357 * NOT_POWERED. We cover both scenarios here since later in
6358 * mgmt_index_removed() any hci_conn callbacks will have already
6359 * been triggered, potentially causing misleading DISCONNECTED
6362 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
6363 status = MGMT_STATUS_INVALID_INDEX;
6365 status = MGMT_STATUS_NOT_POWERED;
6367 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6369 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6370 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6371 zero_cod, sizeof(zero_cod), NULL);
6374 err = new_settings(hdev, match.sk);
6382 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6384 struct pending_cmd *cmd;
6387 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6391 if (err == -ERFKILL)
6392 status = MGMT_STATUS_RFKILLED;
6394 status = MGMT_STATUS_FAILED;
6396 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6398 mgmt_pending_remove(cmd);
6401 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6403 struct hci_request req;
6407 /* When discoverable timeout triggers, then just make sure
6408 * the limited discoverable flag is cleared. Even in the case
6409 * of a timeout triggered from general discoverable, it is
6410 * safe to unconditionally clear the flag.
6412 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6413 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6415 hci_req_init(&req, hdev);
6416 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6417 u8 scan = SCAN_PAGE;
6418 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6419 sizeof(scan), &scan);
6422 update_adv_data(&req);
6423 hci_req_run(&req, NULL);
6425 hdev->discov_timeout = 0;
6427 new_settings(hdev, NULL);
6429 hci_dev_unlock(hdev);
6432 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6435 struct mgmt_ev_new_link_key ev;
6437 memset(&ev, 0, sizeof(ev));
6439 ev.store_hint = persistent;
6440 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6441 ev.key.addr.type = BDADDR_BREDR;
6442 ev.key.type = key->type;
6443 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6444 ev.key.pin_len = key->pin_len;
6446 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6449 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6451 switch (ltk->type) {
6454 if (ltk->authenticated)
6455 return MGMT_LTK_AUTHENTICATED;
6456 return MGMT_LTK_UNAUTHENTICATED;
6458 if (ltk->authenticated)
6459 return MGMT_LTK_P256_AUTH;
6460 return MGMT_LTK_P256_UNAUTH;
6461 case SMP_LTK_P256_DEBUG:
6462 return MGMT_LTK_P256_DEBUG;
6465 return MGMT_LTK_UNAUTHENTICATED;
6468 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6470 struct mgmt_ev_new_long_term_key ev;
6472 memset(&ev, 0, sizeof(ev));
6474 /* Devices using resolvable or non-resolvable random addresses
6475 * without providing an indentity resolving key don't require
6476 * to store long term keys. Their addresses will change the
6479 * Only when a remote device provides an identity address
6480 * make sure the long term key is stored. If the remote
6481 * identity is known, the long term keys are internally
6482 * mapped to the identity address. So allow static random
6483 * and public addresses here.
6485 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6486 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6487 ev.store_hint = 0x00;
6489 ev.store_hint = persistent;
6491 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6492 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6493 ev.key.type = mgmt_ltk_type(key);
6494 ev.key.enc_size = key->enc_size;
6495 ev.key.ediv = key->ediv;
6496 ev.key.rand = key->rand;
6498 if (key->type == SMP_LTK)
6501 memcpy(ev.key.val, key->val, sizeof(key->val));
6503 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6506 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6508 struct mgmt_ev_new_irk ev;
6510 memset(&ev, 0, sizeof(ev));
6512 /* For identity resolving keys from devices that are already
6513 * using a public address or static random address, do not
6514 * ask for storing this key. The identity resolving key really
6515 * is only mandatory for devices using resovlable random
6518 * Storing all identity resolving keys has the downside that
6519 * they will be also loaded on next boot of they system. More
6520 * identity resolving keys, means more time during scanning is
6521 * needed to actually resolve these addresses.
6523 if (bacmp(&irk->rpa, BDADDR_ANY))
6524 ev.store_hint = 0x01;
6526 ev.store_hint = 0x00;
6528 bacpy(&ev.rpa, &irk->rpa);
6529 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6530 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6531 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6533 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6536 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6539 struct mgmt_ev_new_csrk ev;
6541 memset(&ev, 0, sizeof(ev));
6543 /* Devices using resolvable or non-resolvable random addresses
6544 * without providing an indentity resolving key don't require
6545 * to store signature resolving keys. Their addresses will change
6546 * the next time around.
6548 * Only when a remote device provides an identity address
6549 * make sure the signature resolving key is stored. So allow
6550 * static random and public addresses here.
6552 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6553 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6554 ev.store_hint = 0x00;
6556 ev.store_hint = persistent;
6558 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6559 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6560 ev.key.master = csrk->master;
6561 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6563 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6566 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6567 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6568 u16 max_interval, u16 latency, u16 timeout)
6570 struct mgmt_ev_new_conn_param ev;
6572 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6575 memset(&ev, 0, sizeof(ev));
6576 bacpy(&ev.addr.bdaddr, bdaddr);
6577 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6578 ev.store_hint = store_hint;
6579 ev.min_interval = cpu_to_le16(min_interval);
6580 ev.max_interval = cpu_to_le16(max_interval);
6581 ev.latency = cpu_to_le16(latency);
6582 ev.timeout = cpu_to_le16(timeout);
6584 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6587 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6590 eir[eir_len++] = sizeof(type) + data_len;
6591 eir[eir_len++] = type;
6592 memcpy(&eir[eir_len], data, data_len);
6593 eir_len += data_len;
6598 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6599 u32 flags, u8 *name, u8 name_len)
6602 struct mgmt_ev_device_connected *ev = (void *) buf;
6605 bacpy(&ev->addr.bdaddr, &conn->dst);
6606 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6608 ev->flags = __cpu_to_le32(flags);
6610 /* We must ensure that the EIR Data fields are ordered and
6611 * unique. Keep it simple for now and avoid the problem by not
6612 * adding any BR/EDR data to the LE adv.
6614 if (conn->le_adv_data_len > 0) {
6615 memcpy(&ev->eir[eir_len],
6616 conn->le_adv_data, conn->le_adv_data_len);
6617 eir_len = conn->le_adv_data_len;
6620 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6623 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6624 eir_len = eir_append_data(ev->eir, eir_len,
6626 conn->dev_class, 3);
6629 ev->eir_len = cpu_to_le16(eir_len);
6631 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6632 sizeof(*ev) + eir_len, NULL);
6635 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6637 struct sock **sk = data;
6639 cmd->cmd_complete(cmd, 0);
6644 mgmt_pending_remove(cmd);
6647 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6649 struct hci_dev *hdev = data;
6650 struct mgmt_cp_unpair_device *cp = cmd->param;
6652 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6654 cmd->cmd_complete(cmd, 0);
6655 mgmt_pending_remove(cmd);
6658 bool mgmt_powering_down(struct hci_dev *hdev)
6660 struct pending_cmd *cmd;
6661 struct mgmt_mode *cp;
6663 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6674 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6675 u8 link_type, u8 addr_type, u8 reason,
6676 bool mgmt_connected)
6678 struct mgmt_ev_device_disconnected ev;
6679 struct sock *sk = NULL;
6681 /* The connection is still in hci_conn_hash so test for 1
6682 * instead of 0 to know if this is the last one.
6684 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6685 cancel_delayed_work(&hdev->power_off);
6686 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6689 if (!mgmt_connected)
6692 if (link_type != ACL_LINK && link_type != LE_LINK)
6695 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6697 bacpy(&ev.addr.bdaddr, bdaddr);
6698 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6701 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6706 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6710 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6711 u8 link_type, u8 addr_type, u8 status)
6713 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6714 struct mgmt_cp_disconnect *cp;
6715 struct pending_cmd *cmd;
6717 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6720 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6726 if (bacmp(bdaddr, &cp->addr.bdaddr))
6729 if (cp->addr.type != bdaddr_type)
6732 cmd->cmd_complete(cmd, mgmt_status(status));
6733 mgmt_pending_remove(cmd);
6736 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6737 u8 addr_type, u8 status)
6739 struct mgmt_ev_connect_failed ev;
6741 /* The connection is still in hci_conn_hash so test for 1
6742 * instead of 0 to know if this is the last one.
6744 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6745 cancel_delayed_work(&hdev->power_off);
6746 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6749 bacpy(&ev.addr.bdaddr, bdaddr);
6750 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6751 ev.status = mgmt_status(status);
6753 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6756 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6758 struct mgmt_ev_pin_code_request ev;
6760 bacpy(&ev.addr.bdaddr, bdaddr);
6761 ev.addr.type = BDADDR_BREDR;
6764 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6767 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6770 struct pending_cmd *cmd;
6772 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6776 cmd->cmd_complete(cmd, mgmt_status(status));
6777 mgmt_pending_remove(cmd);
6780 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6783 struct pending_cmd *cmd;
6785 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6789 cmd->cmd_complete(cmd, mgmt_status(status));
6790 mgmt_pending_remove(cmd);
6793 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6794 u8 link_type, u8 addr_type, u32 value,
6797 struct mgmt_ev_user_confirm_request ev;
6799 BT_DBG("%s", hdev->name);
6801 bacpy(&ev.addr.bdaddr, bdaddr);
6802 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6803 ev.confirm_hint = confirm_hint;
6804 ev.value = cpu_to_le32(value);
6806 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6810 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6811 u8 link_type, u8 addr_type)
6813 struct mgmt_ev_user_passkey_request ev;
6815 BT_DBG("%s", hdev->name);
6817 bacpy(&ev.addr.bdaddr, bdaddr);
6818 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6820 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6824 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6825 u8 link_type, u8 addr_type, u8 status,
6828 struct pending_cmd *cmd;
6830 cmd = mgmt_pending_find(opcode, hdev);
6834 cmd->cmd_complete(cmd, mgmt_status(status));
6835 mgmt_pending_remove(cmd);
6840 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6841 u8 link_type, u8 addr_type, u8 status)
6843 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6844 status, MGMT_OP_USER_CONFIRM_REPLY);
6847 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6848 u8 link_type, u8 addr_type, u8 status)
6850 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6852 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6855 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6856 u8 link_type, u8 addr_type, u8 status)
6858 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6859 status, MGMT_OP_USER_PASSKEY_REPLY);
6862 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6863 u8 link_type, u8 addr_type, u8 status)
6865 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6867 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6870 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6871 u8 link_type, u8 addr_type, u32 passkey,
6874 struct mgmt_ev_passkey_notify ev;
6876 BT_DBG("%s", hdev->name);
6878 bacpy(&ev.addr.bdaddr, bdaddr);
6879 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6880 ev.passkey = __cpu_to_le32(passkey);
6881 ev.entered = entered;
6883 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6886 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6888 struct mgmt_ev_auth_failed ev;
6889 struct pending_cmd *cmd;
6890 u8 status = mgmt_status(hci_status);
6892 bacpy(&ev.addr.bdaddr, &conn->dst);
6893 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6896 cmd = find_pairing(conn);
6898 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6899 cmd ? cmd->sk : NULL);
6902 cmd->cmd_complete(cmd, status);
6903 mgmt_pending_remove(cmd);
6907 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6909 struct cmd_lookup match = { NULL, hdev };
6913 u8 mgmt_err = mgmt_status(status);
6914 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6915 cmd_status_rsp, &mgmt_err);
6919 if (test_bit(HCI_AUTH, &hdev->flags))
6920 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6923 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6926 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6930 new_settings(hdev, match.sk);
6936 static void clear_eir(struct hci_request *req)
6938 struct hci_dev *hdev = req->hdev;
6939 struct hci_cp_write_eir cp;
6941 if (!lmp_ext_inq_capable(hdev))
6944 memset(hdev->eir, 0, sizeof(hdev->eir));
6946 memset(&cp, 0, sizeof(cp));
6948 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6951 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6953 struct cmd_lookup match = { NULL, hdev };
6954 struct hci_request req;
6955 bool changed = false;
6958 u8 mgmt_err = mgmt_status(status);
6960 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6961 &hdev->dev_flags)) {
6962 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6963 new_settings(hdev, NULL);
6966 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6972 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6974 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6976 changed = test_and_clear_bit(HCI_HS_ENABLED,
6979 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6982 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6985 new_settings(hdev, match.sk);
6990 hci_req_init(&req, hdev);
6992 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6993 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6994 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6995 sizeof(enable), &enable);
7001 hci_req_run(&req, NULL);
7004 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7006 struct cmd_lookup match = { NULL, hdev };
7007 bool changed = false;
7010 u8 mgmt_err = mgmt_status(status);
7013 if (test_and_clear_bit(HCI_SC_ENABLED,
7015 new_settings(hdev, NULL);
7016 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
7019 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
7020 cmd_status_rsp, &mgmt_err);
7025 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
7027 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
7028 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
7031 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
7032 settings_rsp, &match);
7035 new_settings(hdev, match.sk);
7041 static void sk_lookup(struct pending_cmd *cmd, void *data)
7043 struct cmd_lookup *match = data;
7045 if (match->sk == NULL) {
7046 match->sk = cmd->sk;
7047 sock_hold(match->sk);
7051 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7054 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7056 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7057 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7058 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7061 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
7068 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7070 struct mgmt_cp_set_local_name ev;
7071 struct pending_cmd *cmd;
7076 memset(&ev, 0, sizeof(ev));
7077 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7078 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7080 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7082 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7084 /* If this is a HCI command related to powering on the
7085 * HCI dev don't send any mgmt signals.
7087 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
7091 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7092 cmd ? cmd->sk : NULL);
7095 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7096 u8 *rand192, u8 *hash256, u8 *rand256,
7099 struct pending_cmd *cmd;
7101 BT_DBG("%s status %u", hdev->name, status);
7103 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7108 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7109 mgmt_status(status));
7111 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7112 struct mgmt_rp_read_local_oob_ext_data rp;
7114 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7115 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7117 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7118 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7120 cmd_complete(cmd->sk, hdev->id,
7121 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7124 struct mgmt_rp_read_local_oob_data rp;
7126 memcpy(rp.hash, hash192, sizeof(rp.hash));
7127 memcpy(rp.rand, rand192, sizeof(rp.rand));
7129 cmd_complete(cmd->sk, hdev->id,
7130 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7135 mgmt_pending_remove(cmd);
7138 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7142 for (i = 0; i < uuid_count; i++) {
7143 if (!memcmp(uuid, uuids[i], 16))
7150 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7154 while (parsed < eir_len) {
7155 u8 field_len = eir[0];
7162 if (eir_len - parsed < field_len + 1)
7166 case EIR_UUID16_ALL:
7167 case EIR_UUID16_SOME:
7168 for (i = 0; i + 3 <= field_len; i += 2) {
7169 memcpy(uuid, bluetooth_base_uuid, 16);
7170 uuid[13] = eir[i + 3];
7171 uuid[12] = eir[i + 2];
7172 if (has_uuid(uuid, uuid_count, uuids))
7176 case EIR_UUID32_ALL:
7177 case EIR_UUID32_SOME:
7178 for (i = 0; i + 5 <= field_len; i += 4) {
7179 memcpy(uuid, bluetooth_base_uuid, 16);
7180 uuid[15] = eir[i + 5];
7181 uuid[14] = eir[i + 4];
7182 uuid[13] = eir[i + 3];
7183 uuid[12] = eir[i + 2];
7184 if (has_uuid(uuid, uuid_count, uuids))
7188 case EIR_UUID128_ALL:
7189 case EIR_UUID128_SOME:
7190 for (i = 0; i + 17 <= field_len; i += 16) {
7191 memcpy(uuid, eir + i + 2, 16);
7192 if (has_uuid(uuid, uuid_count, uuids))
7198 parsed += field_len + 1;
7199 eir += field_len + 1;
7205 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7206 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7207 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7210 struct mgmt_ev_device_found *ev = (void *) buf;
7214 /* Don't send events for a non-kernel initiated discovery. With
7215 * LE one exception is if we have pend_le_reports > 0 in which
7216 * case we're doing passive scanning and want these events.
7218 if (!hci_discovery_active(hdev)) {
7219 if (link_type == ACL_LINK)
7221 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7225 /* When using service discovery with a RSSI threshold, then check
7226 * if such a RSSI threshold is specified. If a RSSI threshold has
7227 * been specified, then all results with a RSSI smaller than the
7228 * RSSI threshold will be dropped.
7230 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7231 * the results are also dropped.
7233 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7234 (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID))
7237 /* Make sure that the buffer is big enough. The 5 extra bytes
7238 * are for the potential CoD field.
7240 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7243 memset(buf, 0, sizeof(buf));
7245 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7246 * RSSI value was reported as 0 when not available. This behavior
7247 * is kept when using device discovery. This is required for full
7248 * backwards compatibility with the API.
7250 * However when using service discovery, the value 127 will be
7251 * returned when the RSSI is not available.
7253 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7254 link_type == ACL_LINK)
7257 bacpy(&ev->addr.bdaddr, bdaddr);
7258 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7260 ev->flags = cpu_to_le32(flags);
7263 /* When using service discovery and a list of UUID is
7264 * provided, results with no matching UUID should be
7265 * dropped. In case there is a match the result is
7266 * kept and checking possible scan response data
7269 if (hdev->discovery.uuid_count > 0)
7270 match = eir_has_uuids(eir, eir_len,
7271 hdev->discovery.uuid_count,
7272 hdev->discovery.uuids);
7276 if (!match && !scan_rsp_len)
7279 /* Copy EIR or advertising data into event */
7280 memcpy(ev->eir, eir, eir_len);
7282 /* When using service discovery and a list of UUID is
7283 * provided, results with empty EIR or advertising data
7284 * should be dropped since they do not match any UUID.
7286 if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
7292 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7293 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7296 if (scan_rsp_len > 0) {
7297 /* When using service discovery and a list of UUID is
7298 * provided, results with no matching UUID should be
7299 * dropped if there is no previous match from the
7302 if (hdev->discovery.uuid_count > 0) {
7303 if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
7304 hdev->discovery.uuid_count,
7305 hdev->discovery.uuids))
7309 /* Append scan response data to event */
7310 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7312 /* When using service discovery and a list of UUID is
7313 * provided, results with empty scan response and no
7314 * previous matched advertising data should be dropped.
7316 if (hdev->discovery.uuid_count > 0 && !match)
7320 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7321 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7323 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7326 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7327 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7329 struct mgmt_ev_device_found *ev;
7330 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7333 ev = (struct mgmt_ev_device_found *) buf;
7335 memset(buf, 0, sizeof(buf));
7337 bacpy(&ev->addr.bdaddr, bdaddr);
7338 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7341 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7344 ev->eir_len = cpu_to_le16(eir_len);
7346 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7349 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7351 struct mgmt_ev_discovering ev;
7353 BT_DBG("%s discovering %u", hdev->name, discovering);
7355 memset(&ev, 0, sizeof(ev));
7356 ev.type = hdev->discovery.type;
7357 ev.discovering = discovering;
7359 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7362 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7364 BT_DBG("%s status %u", hdev->name, status);
7367 void mgmt_reenable_advertising(struct hci_dev *hdev)
7369 struct hci_request req;
7371 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7374 hci_req_init(&req, hdev);
7375 enable_advertising(&req);
7376 hci_req_run(&req, adv_enable_complete);