2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "hci_request.h"
38 #define MGMT_VERSION 1
39 #define MGMT_REVISION 8
41 static const u16 mgmt_commands[] = {
42 MGMT_OP_READ_INDEX_LIST,
45 MGMT_OP_SET_DISCOVERABLE,
46 MGMT_OP_SET_CONNECTABLE,
47 MGMT_OP_SET_FAST_CONNECTABLE,
49 MGMT_OP_SET_LINK_SECURITY,
53 MGMT_OP_SET_DEV_CLASS,
54 MGMT_OP_SET_LOCAL_NAME,
57 MGMT_OP_LOAD_LINK_KEYS,
58 MGMT_OP_LOAD_LONG_TERM_KEYS,
60 MGMT_OP_GET_CONNECTIONS,
61 MGMT_OP_PIN_CODE_REPLY,
62 MGMT_OP_PIN_CODE_NEG_REPLY,
63 MGMT_OP_SET_IO_CAPABILITY,
65 MGMT_OP_CANCEL_PAIR_DEVICE,
66 MGMT_OP_UNPAIR_DEVICE,
67 MGMT_OP_USER_CONFIRM_REPLY,
68 MGMT_OP_USER_CONFIRM_NEG_REPLY,
69 MGMT_OP_USER_PASSKEY_REPLY,
70 MGMT_OP_USER_PASSKEY_NEG_REPLY,
71 MGMT_OP_READ_LOCAL_OOB_DATA,
72 MGMT_OP_ADD_REMOTE_OOB_DATA,
73 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
74 MGMT_OP_START_DISCOVERY,
75 MGMT_OP_STOP_DISCOVERY,
78 MGMT_OP_UNBLOCK_DEVICE,
79 MGMT_OP_SET_DEVICE_ID,
80 MGMT_OP_SET_ADVERTISING,
82 MGMT_OP_SET_STATIC_ADDRESS,
83 MGMT_OP_SET_SCAN_PARAMS,
84 MGMT_OP_SET_SECURE_CONN,
85 MGMT_OP_SET_DEBUG_KEYS,
88 MGMT_OP_GET_CONN_INFO,
89 MGMT_OP_GET_CLOCK_INFO,
91 MGMT_OP_REMOVE_DEVICE,
92 MGMT_OP_LOAD_CONN_PARAM,
93 MGMT_OP_READ_UNCONF_INDEX_LIST,
94 MGMT_OP_READ_CONFIG_INFO,
95 MGMT_OP_SET_EXTERNAL_CONFIG,
96 MGMT_OP_SET_PUBLIC_ADDRESS,
97 MGMT_OP_START_SERVICE_DISCOVERY,
100 static const u16 mgmt_events[] = {
101 MGMT_EV_CONTROLLER_ERROR,
103 MGMT_EV_INDEX_REMOVED,
104 MGMT_EV_NEW_SETTINGS,
105 MGMT_EV_CLASS_OF_DEV_CHANGED,
106 MGMT_EV_LOCAL_NAME_CHANGED,
107 MGMT_EV_NEW_LINK_KEY,
108 MGMT_EV_NEW_LONG_TERM_KEY,
109 MGMT_EV_DEVICE_CONNECTED,
110 MGMT_EV_DEVICE_DISCONNECTED,
111 MGMT_EV_CONNECT_FAILED,
112 MGMT_EV_PIN_CODE_REQUEST,
113 MGMT_EV_USER_CONFIRM_REQUEST,
114 MGMT_EV_USER_PASSKEY_REQUEST,
116 MGMT_EV_DEVICE_FOUND,
118 MGMT_EV_DEVICE_BLOCKED,
119 MGMT_EV_DEVICE_UNBLOCKED,
120 MGMT_EV_DEVICE_UNPAIRED,
121 MGMT_EV_PASSKEY_NOTIFY,
124 MGMT_EV_DEVICE_ADDED,
125 MGMT_EV_DEVICE_REMOVED,
126 MGMT_EV_NEW_CONN_PARAM,
127 MGMT_EV_UNCONF_INDEX_ADDED,
128 MGMT_EV_UNCONF_INDEX_REMOVED,
129 MGMT_EV_NEW_CONFIG_OPTIONS,
132 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
135 struct list_head list;
142 int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
145 /* HCI to MGMT error code conversion table */
146 static u8 mgmt_status_table[] = {
148 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
149 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
150 MGMT_STATUS_FAILED, /* Hardware Failure */
151 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
152 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
153 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
154 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
155 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
157 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
158 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
159 MGMT_STATUS_BUSY, /* Command Disallowed */
160 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
161 MGMT_STATUS_REJECTED, /* Rejected Security */
162 MGMT_STATUS_REJECTED, /* Rejected Personal */
163 MGMT_STATUS_TIMEOUT, /* Host Timeout */
164 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
165 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
166 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
167 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
168 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
169 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
170 MGMT_STATUS_BUSY, /* Repeated Attempts */
171 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
172 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
173 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
174 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
175 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
176 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
177 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
178 MGMT_STATUS_FAILED, /* Unspecified Error */
179 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
180 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
181 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
182 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
183 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
184 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
185 MGMT_STATUS_FAILED, /* Unit Link Key Used */
186 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
187 MGMT_STATUS_TIMEOUT, /* Instant Passed */
188 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
189 MGMT_STATUS_FAILED, /* Transaction Collision */
190 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
191 MGMT_STATUS_REJECTED, /* QoS Rejected */
192 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
193 MGMT_STATUS_REJECTED, /* Insufficient Security */
194 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
195 MGMT_STATUS_BUSY, /* Role Switch Pending */
196 MGMT_STATUS_FAILED, /* Slot Violation */
197 MGMT_STATUS_FAILED, /* Role Switch Failed */
198 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
199 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
200 MGMT_STATUS_BUSY, /* Host Busy Pairing */
201 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
202 MGMT_STATUS_BUSY, /* Controller Busy */
203 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
204 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
205 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
206 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
207 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
210 static u8 mgmt_status(u8 hci_status)
212 if (hci_status < ARRAY_SIZE(mgmt_status_table))
213 return mgmt_status_table[hci_status];
215 return MGMT_STATUS_FAILED;
218 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
219 struct sock *skip_sk)
222 struct mgmt_hdr *hdr;
224 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
228 hdr = (void *) skb_put(skb, sizeof(*hdr));
229 hdr->opcode = cpu_to_le16(event);
231 hdr->index = cpu_to_le16(hdev->id);
233 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
234 hdr->len = cpu_to_le16(data_len);
237 memcpy(skb_put(skb, data_len), data, data_len);
240 __net_timestamp(skb);
242 hci_send_to_control(skb, skip_sk);
248 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
251 struct mgmt_hdr *hdr;
252 struct mgmt_ev_cmd_status *ev;
255 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
257 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
261 hdr = (void *) skb_put(skb, sizeof(*hdr));
263 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
264 hdr->index = cpu_to_le16(index);
265 hdr->len = cpu_to_le16(sizeof(*ev));
267 ev = (void *) skb_put(skb, sizeof(*ev));
269 ev->opcode = cpu_to_le16(cmd);
271 err = sock_queue_rcv_skb(sk, skb);
278 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
279 void *rp, size_t rp_len)
282 struct mgmt_hdr *hdr;
283 struct mgmt_ev_cmd_complete *ev;
286 BT_DBG("sock %p", sk);
288 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
292 hdr = (void *) skb_put(skb, sizeof(*hdr));
294 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
295 hdr->index = cpu_to_le16(index);
296 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
298 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
299 ev->opcode = cpu_to_le16(cmd);
303 memcpy(ev->data, rp, rp_len);
305 err = sock_queue_rcv_skb(sk, skb);
312 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
315 struct mgmt_rp_read_version rp;
317 BT_DBG("sock %p", sk);
319 rp.version = MGMT_VERSION;
320 rp.revision = cpu_to_le16(MGMT_REVISION);
322 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
326 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
329 struct mgmt_rp_read_commands *rp;
330 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
331 const u16 num_events = ARRAY_SIZE(mgmt_events);
336 BT_DBG("sock %p", sk);
338 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
340 rp = kmalloc(rp_size, GFP_KERNEL);
344 rp->num_commands = cpu_to_le16(num_commands);
345 rp->num_events = cpu_to_le16(num_events);
347 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
348 put_unaligned_le16(mgmt_commands[i], opcode);
350 for (i = 0; i < num_events; i++, opcode++)
351 put_unaligned_le16(mgmt_events[i], opcode);
353 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
360 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
363 struct mgmt_rp_read_index_list *rp;
369 BT_DBG("sock %p", sk);
371 read_lock(&hci_dev_list_lock);
374 list_for_each_entry(d, &hci_dev_list, list) {
375 if (d->dev_type == HCI_BREDR &&
376 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
380 rp_len = sizeof(*rp) + (2 * count);
381 rp = kmalloc(rp_len, GFP_ATOMIC);
383 read_unlock(&hci_dev_list_lock);
388 list_for_each_entry(d, &hci_dev_list, list) {
389 if (test_bit(HCI_SETUP, &d->dev_flags) ||
390 test_bit(HCI_CONFIG, &d->dev_flags) ||
391 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
394 /* Devices marked as raw-only are neither configured
395 * nor unconfigured controllers.
397 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
400 if (d->dev_type == HCI_BREDR &&
401 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
402 rp->index[count++] = cpu_to_le16(d->id);
403 BT_DBG("Added hci%u", d->id);
407 rp->num_controllers = cpu_to_le16(count);
408 rp_len = sizeof(*rp) + (2 * count);
410 read_unlock(&hci_dev_list_lock);
412 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
420 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
421 void *data, u16 data_len)
423 struct mgmt_rp_read_unconf_index_list *rp;
429 BT_DBG("sock %p", sk);
431 read_lock(&hci_dev_list_lock);
434 list_for_each_entry(d, &hci_dev_list, list) {
435 if (d->dev_type == HCI_BREDR &&
436 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
440 rp_len = sizeof(*rp) + (2 * count);
441 rp = kmalloc(rp_len, GFP_ATOMIC);
443 read_unlock(&hci_dev_list_lock);
448 list_for_each_entry(d, &hci_dev_list, list) {
449 if (test_bit(HCI_SETUP, &d->dev_flags) ||
450 test_bit(HCI_CONFIG, &d->dev_flags) ||
451 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
454 /* Devices marked as raw-only are neither configured
455 * nor unconfigured controllers.
457 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
460 if (d->dev_type == HCI_BREDR &&
461 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
462 rp->index[count++] = cpu_to_le16(d->id);
463 BT_DBG("Added hci%u", d->id);
467 rp->num_controllers = cpu_to_le16(count);
468 rp_len = sizeof(*rp) + (2 * count);
470 read_unlock(&hci_dev_list_lock);
472 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
480 static bool is_configured(struct hci_dev *hdev)
482 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
483 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
486 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
487 !bacmp(&hdev->public_addr, BDADDR_ANY))
493 static __le32 get_missing_options(struct hci_dev *hdev)
497 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
498 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
499 options |= MGMT_OPTION_EXTERNAL_CONFIG;
501 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
502 !bacmp(&hdev->public_addr, BDADDR_ANY))
503 options |= MGMT_OPTION_PUBLIC_ADDRESS;
505 return cpu_to_le32(options);
508 static int new_options(struct hci_dev *hdev, struct sock *skip)
510 __le32 options = get_missing_options(hdev);
512 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
513 sizeof(options), skip);
516 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
518 __le32 options = get_missing_options(hdev);
520 return cmd_complete(sk, hdev->id, opcode, 0, &options,
524 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
525 void *data, u16 data_len)
527 struct mgmt_rp_read_config_info rp;
530 BT_DBG("sock %p %s", sk, hdev->name);
534 memset(&rp, 0, sizeof(rp));
535 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
537 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
538 options |= MGMT_OPTION_EXTERNAL_CONFIG;
540 if (hdev->set_bdaddr)
541 options |= MGMT_OPTION_PUBLIC_ADDRESS;
543 rp.supported_options = cpu_to_le32(options);
544 rp.missing_options = get_missing_options(hdev);
546 hci_dev_unlock(hdev);
548 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
552 static u32 get_supported_settings(struct hci_dev *hdev)
556 settings |= MGMT_SETTING_POWERED;
557 settings |= MGMT_SETTING_BONDABLE;
558 settings |= MGMT_SETTING_DEBUG_KEYS;
559 settings |= MGMT_SETTING_CONNECTABLE;
560 settings |= MGMT_SETTING_DISCOVERABLE;
562 if (lmp_bredr_capable(hdev)) {
563 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
564 settings |= MGMT_SETTING_FAST_CONNECTABLE;
565 settings |= MGMT_SETTING_BREDR;
566 settings |= MGMT_SETTING_LINK_SECURITY;
568 if (lmp_ssp_capable(hdev)) {
569 settings |= MGMT_SETTING_SSP;
570 settings |= MGMT_SETTING_HS;
573 if (lmp_sc_capable(hdev))
574 settings |= MGMT_SETTING_SECURE_CONN;
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_SECURE_CONN;
581 settings |= MGMT_SETTING_PRIVACY;
584 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
586 settings |= MGMT_SETTING_CONFIGURATION;
591 static u32 get_current_settings(struct hci_dev *hdev)
595 if (hdev_is_powered(hdev))
596 settings |= MGMT_SETTING_POWERED;
598 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
599 settings |= MGMT_SETTING_CONNECTABLE;
601 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
602 settings |= MGMT_SETTING_FAST_CONNECTABLE;
604 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
605 settings |= MGMT_SETTING_DISCOVERABLE;
607 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
608 settings |= MGMT_SETTING_BONDABLE;
610 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
611 settings |= MGMT_SETTING_BREDR;
613 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
614 settings |= MGMT_SETTING_LE;
616 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
617 settings |= MGMT_SETTING_LINK_SECURITY;
619 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
620 settings |= MGMT_SETTING_SSP;
622 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
623 settings |= MGMT_SETTING_HS;
625 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
626 settings |= MGMT_SETTING_ADVERTISING;
628 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
629 settings |= MGMT_SETTING_SECURE_CONN;
631 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
632 settings |= MGMT_SETTING_DEBUG_KEYS;
634 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
635 settings |= MGMT_SETTING_PRIVACY;
640 #define PNP_INFO_SVCLASS_ID 0x1200
642 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
644 u8 *ptr = data, *uuids_start = NULL;
645 struct bt_uuid *uuid;
650 list_for_each_entry(uuid, &hdev->uuids, list) {
653 if (uuid->size != 16)
656 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
660 if (uuid16 == PNP_INFO_SVCLASS_ID)
666 uuids_start[1] = EIR_UUID16_ALL;
670 /* Stop if not enough space to put next UUID */
671 if ((ptr - data) + sizeof(u16) > len) {
672 uuids_start[1] = EIR_UUID16_SOME;
676 *ptr++ = (uuid16 & 0x00ff);
677 *ptr++ = (uuid16 & 0xff00) >> 8;
678 uuids_start[0] += sizeof(uuid16);
684 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
686 u8 *ptr = data, *uuids_start = NULL;
687 struct bt_uuid *uuid;
692 list_for_each_entry(uuid, &hdev->uuids, list) {
693 if (uuid->size != 32)
699 uuids_start[1] = EIR_UUID32_ALL;
703 /* Stop if not enough space to put next UUID */
704 if ((ptr - data) + sizeof(u32) > len) {
705 uuids_start[1] = EIR_UUID32_SOME;
709 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
711 uuids_start[0] += sizeof(u32);
717 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
719 u8 *ptr = data, *uuids_start = NULL;
720 struct bt_uuid *uuid;
725 list_for_each_entry(uuid, &hdev->uuids, list) {
726 if (uuid->size != 128)
732 uuids_start[1] = EIR_UUID128_ALL;
736 /* Stop if not enough space to put next UUID */
737 if ((ptr - data) + 16 > len) {
738 uuids_start[1] = EIR_UUID128_SOME;
742 memcpy(ptr, uuid->uuid, 16);
744 uuids_start[0] += 16;
750 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
752 struct pending_cmd *cmd;
754 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
755 if (cmd->opcode == opcode)
762 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
763 struct hci_dev *hdev,
766 struct pending_cmd *cmd;
768 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
769 if (cmd->user_data != data)
771 if (cmd->opcode == opcode)
778 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
783 name_len = strlen(hdev->dev_name);
785 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
787 if (name_len > max_len) {
789 ptr[1] = EIR_NAME_SHORT;
791 ptr[1] = EIR_NAME_COMPLETE;
793 ptr[0] = name_len + 1;
795 memcpy(ptr + 2, hdev->dev_name, name_len);
797 ad_len += (name_len + 2);
798 ptr += (name_len + 2);
804 static void update_scan_rsp_data(struct hci_request *req)
806 struct hci_dev *hdev = req->hdev;
807 struct hci_cp_le_set_scan_rsp_data cp;
810 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
813 memset(&cp, 0, sizeof(cp));
815 len = create_scan_rsp_data(hdev, cp.data);
817 if (hdev->scan_rsp_data_len == len &&
818 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
821 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
822 hdev->scan_rsp_data_len = len;
826 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
829 static u8 get_adv_discov_flags(struct hci_dev *hdev)
831 struct pending_cmd *cmd;
833 /* If there's a pending mgmt command the flags will not yet have
834 * their final values, so check for this first.
836 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
838 struct mgmt_mode *cp = cmd->param;
840 return LE_AD_GENERAL;
841 else if (cp->val == 0x02)
842 return LE_AD_LIMITED;
844 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
845 return LE_AD_LIMITED;
846 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
847 return LE_AD_GENERAL;
853 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
855 u8 ad_len = 0, flags = 0;
857 flags |= get_adv_discov_flags(hdev);
859 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
860 flags |= LE_AD_NO_BREDR;
863 BT_DBG("adv flags 0x%02x", flags);
873 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
875 ptr[1] = EIR_TX_POWER;
876 ptr[2] = (u8) hdev->adv_tx_power;
885 static void update_adv_data(struct hci_request *req)
887 struct hci_dev *hdev = req->hdev;
888 struct hci_cp_le_set_adv_data cp;
891 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
894 memset(&cp, 0, sizeof(cp));
896 len = create_adv_data(hdev, cp.data);
898 if (hdev->adv_data_len == len &&
899 memcmp(cp.data, hdev->adv_data, len) == 0)
902 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
903 hdev->adv_data_len = len;
907 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
910 int mgmt_update_adv_data(struct hci_dev *hdev)
912 struct hci_request req;
914 hci_req_init(&req, hdev);
915 update_adv_data(&req);
917 return hci_req_run(&req, NULL);
920 static void create_eir(struct hci_dev *hdev, u8 *data)
925 name_len = strlen(hdev->dev_name);
931 ptr[1] = EIR_NAME_SHORT;
933 ptr[1] = EIR_NAME_COMPLETE;
935 /* EIR Data length */
936 ptr[0] = name_len + 1;
938 memcpy(ptr + 2, hdev->dev_name, name_len);
940 ptr += (name_len + 2);
943 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
945 ptr[1] = EIR_TX_POWER;
946 ptr[2] = (u8) hdev->inq_tx_power;
951 if (hdev->devid_source > 0) {
953 ptr[1] = EIR_DEVICE_ID;
955 put_unaligned_le16(hdev->devid_source, ptr + 2);
956 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
957 put_unaligned_le16(hdev->devid_product, ptr + 6);
958 put_unaligned_le16(hdev->devid_version, ptr + 8);
963 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
964 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
965 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
968 static void update_eir(struct hci_request *req)
970 struct hci_dev *hdev = req->hdev;
971 struct hci_cp_write_eir cp;
973 if (!hdev_is_powered(hdev))
976 if (!lmp_ext_inq_capable(hdev))
979 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
982 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
985 memset(&cp, 0, sizeof(cp));
987 create_eir(hdev, cp.data);
989 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
992 memcpy(hdev->eir, cp.data, sizeof(cp.data));
994 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
997 static u8 get_service_classes(struct hci_dev *hdev)
999 struct bt_uuid *uuid;
1002 list_for_each_entry(uuid, &hdev->uuids, list)
1003 val |= uuid->svc_hint;
1008 static void update_class(struct hci_request *req)
1010 struct hci_dev *hdev = req->hdev;
1013 BT_DBG("%s", hdev->name);
1015 if (!hdev_is_powered(hdev))
1018 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1021 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1024 cod[0] = hdev->minor_class;
1025 cod[1] = hdev->major_class;
1026 cod[2] = get_service_classes(hdev);
1028 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1031 if (memcmp(cod, hdev->dev_class, 3) == 0)
1034 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1037 static bool get_connectable(struct hci_dev *hdev)
1039 struct pending_cmd *cmd;
1041 /* If there's a pending mgmt command the flag will not yet have
1042 * it's final value, so check for this first.
1044 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1046 struct mgmt_mode *cp = cmd->param;
1050 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1053 static void disable_advertising(struct hci_request *req)
1057 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1060 static void enable_advertising(struct hci_request *req)
1062 struct hci_dev *hdev = req->hdev;
1063 struct hci_cp_le_set_adv_param cp;
1064 u8 own_addr_type, enable = 0x01;
1067 if (hci_conn_num(hdev, LE_LINK) > 0)
1070 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1071 disable_advertising(req);
1073 /* Clear the HCI_LE_ADV bit temporarily so that the
1074 * hci_update_random_address knows that it's safe to go ahead
1075 * and write a new random address. The flag will be set back on
1076 * as soon as the SET_ADV_ENABLE HCI command completes.
1078 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1080 connectable = get_connectable(hdev);
1082 /* Set require_privacy to true only when non-connectable
1083 * advertising is used. In that case it is fine to use a
1084 * non-resolvable private address.
1086 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1089 memset(&cp, 0, sizeof(cp));
1090 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1091 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1092 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1093 cp.own_address_type = own_addr_type;
1094 cp.channel_map = hdev->le_adv_channel_map;
1096 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1098 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1101 static void service_cache_off(struct work_struct *work)
1103 struct hci_dev *hdev = container_of(work, struct hci_dev,
1104 service_cache.work);
1105 struct hci_request req;
1107 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1110 hci_req_init(&req, hdev);
1117 hci_dev_unlock(hdev);
1119 hci_req_run(&req, NULL);
1122 static void rpa_expired(struct work_struct *work)
1124 struct hci_dev *hdev = container_of(work, struct hci_dev,
1126 struct hci_request req;
1130 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1132 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1135 /* The generation of a new RPA and programming it into the
1136 * controller happens in the enable_advertising() function.
1138 hci_req_init(&req, hdev);
1139 enable_advertising(&req);
1140 hci_req_run(&req, NULL);
1143 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1145 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1148 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1149 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1151 /* Non-mgmt controlled devices get this bit set
1152 * implicitly so that pairing works for them, however
1153 * for mgmt we require user-space to explicitly enable
1156 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1159 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1160 void *data, u16 data_len)
1162 struct mgmt_rp_read_info rp;
1164 BT_DBG("sock %p %s", sk, hdev->name);
1168 memset(&rp, 0, sizeof(rp));
1170 bacpy(&rp.bdaddr, &hdev->bdaddr);
1172 rp.version = hdev->hci_ver;
1173 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1175 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1176 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1178 memcpy(rp.dev_class, hdev->dev_class, 3);
1180 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1181 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1183 hci_dev_unlock(hdev);
1185 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1189 static void mgmt_pending_free(struct pending_cmd *cmd)
1196 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1197 struct hci_dev *hdev, void *data,
1200 struct pending_cmd *cmd;
1202 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1206 cmd->opcode = opcode;
1207 cmd->index = hdev->id;
1209 cmd->param = kmemdup(data, len, GFP_KERNEL);
1215 cmd->param_len = len;
1220 list_add(&cmd->list, &hdev->mgmt_pending);
1225 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1226 void (*cb)(struct pending_cmd *cmd,
1230 struct pending_cmd *cmd, *tmp;
1232 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1233 if (opcode > 0 && cmd->opcode != opcode)
1240 static void mgmt_pending_remove(struct pending_cmd *cmd)
1242 list_del(&cmd->list);
1243 mgmt_pending_free(cmd);
1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1254 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1256 BT_DBG("%s status 0x%02x", hdev->name, status);
1258 if (hci_conn_count(hdev) == 0) {
1259 cancel_delayed_work(&hdev->power_off);
1260 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1264 static bool hci_stop_discovery(struct hci_request *req)
1266 struct hci_dev *hdev = req->hdev;
1267 struct hci_cp_remote_name_req_cancel cp;
1268 struct inquiry_entry *e;
1270 switch (hdev->discovery.state) {
1271 case DISCOVERY_FINDING:
1272 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1273 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1275 cancel_delayed_work(&hdev->le_scan_disable);
1276 hci_req_add_le_scan_disable(req);
1281 case DISCOVERY_RESOLVING:
1282 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1287 bacpy(&cp.bdaddr, &e->data.bdaddr);
1288 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1294 /* Passive scanning */
1295 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1296 hci_req_add_le_scan_disable(req);
1306 static int clean_up_hci_state(struct hci_dev *hdev)
1308 struct hci_request req;
1309 struct hci_conn *conn;
1310 bool discov_stopped;
1313 hci_req_init(&req, hdev);
1315 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1316 test_bit(HCI_PSCAN, &hdev->flags)) {
1318 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1321 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1322 disable_advertising(&req);
1324 discov_stopped = hci_stop_discovery(&req);
1326 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1327 struct hci_cp_disconnect dc;
1328 struct hci_cp_reject_conn_req rej;
1330 switch (conn->state) {
1333 dc.handle = cpu_to_le16(conn->handle);
1334 dc.reason = 0x15; /* Terminated due to Power Off */
1335 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1338 if (conn->type == LE_LINK)
1339 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1341 else if (conn->type == ACL_LINK)
1342 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1346 bacpy(&rej.bdaddr, &conn->dst);
1347 rej.reason = 0x15; /* Terminated due to Power Off */
1348 if (conn->type == ACL_LINK)
1349 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1351 else if (conn->type == SCO_LINK)
1352 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1358 err = hci_req_run(&req, clean_up_hci_complete);
1359 if (!err && discov_stopped)
1360 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1365 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1368 struct mgmt_mode *cp = data;
1369 struct pending_cmd *cmd;
1372 BT_DBG("request for %s", hdev->name);
1374 if (cp->val != 0x00 && cp->val != 0x01)
1375 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1376 MGMT_STATUS_INVALID_PARAMS);
1380 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1381 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1386 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1387 cancel_delayed_work(&hdev->power_off);
1390 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1392 err = mgmt_powered(hdev, 1);
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1409 queue_work(hdev->req_workqueue, &hdev->power_on);
1412 /* Disconnect connections, stop scans, etc */
1413 err = clean_up_hci_state(hdev);
1415 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1416 HCI_POWER_OFF_TIMEOUT);
1418 /* ENODATA means there were no HCI commands queued */
1419 if (err == -ENODATA) {
1420 cancel_delayed_work(&hdev->power_off);
1421 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1427 hci_dev_unlock(hdev);
1431 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1435 ev = cpu_to_le32(get_current_settings(hdev));
1437 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1440 int mgmt_new_settings(struct hci_dev *hdev)
1442 return new_settings(hdev, NULL);
1447 struct hci_dev *hdev;
1451 static void settings_rsp(struct pending_cmd *cmd, void *data)
1453 struct cmd_lookup *match = data;
1455 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1457 list_del(&cmd->list);
1459 if (match->sk == NULL) {
1460 match->sk = cmd->sk;
1461 sock_hold(match->sk);
1464 mgmt_pending_free(cmd);
1467 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1471 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1472 mgmt_pending_remove(cmd);
1475 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1477 if (cmd->cmd_complete) {
1480 cmd->cmd_complete(cmd, *status);
1481 mgmt_pending_remove(cmd);
1486 cmd_status_rsp(cmd, data);
1489 static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1491 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1492 cmd->param, cmd->param_len);
1495 static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1497 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1498 sizeof(struct mgmt_addr_info));
1501 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1503 if (!lmp_bredr_capable(hdev))
1504 return MGMT_STATUS_NOT_SUPPORTED;
1505 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1506 return MGMT_STATUS_REJECTED;
1508 return MGMT_STATUS_SUCCESS;
1511 static u8 mgmt_le_support(struct hci_dev *hdev)
1513 if (!lmp_le_capable(hdev))
1514 return MGMT_STATUS_NOT_SUPPORTED;
1515 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1516 return MGMT_STATUS_REJECTED;
1518 return MGMT_STATUS_SUCCESS;
1521 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1524 struct pending_cmd *cmd;
1525 struct mgmt_mode *cp;
1526 struct hci_request req;
1529 BT_DBG("status 0x%02x", status);
1533 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1538 u8 mgmt_err = mgmt_status(status);
1539 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1540 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1546 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1549 if (hdev->discov_timeout > 0) {
1550 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1551 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1555 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1559 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1562 new_settings(hdev, cmd->sk);
1564 /* When the discoverable mode gets changed, make sure
1565 * that class of device has the limited discoverable
1566 * bit correctly set. Also update page scan based on whitelist
1569 hci_req_init(&req, hdev);
1570 __hci_update_page_scan(&req);
1572 hci_req_run(&req, NULL);
1575 mgmt_pending_remove(cmd);
1578 hci_dev_unlock(hdev);
1581 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1584 struct mgmt_cp_set_discoverable *cp = data;
1585 struct pending_cmd *cmd;
1586 struct hci_request req;
1591 BT_DBG("request for %s", hdev->name);
1593 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1594 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1595 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1596 MGMT_STATUS_REJECTED);
1598 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1599 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_INVALID_PARAMS);
1602 timeout = __le16_to_cpu(cp->timeout);
1604 /* Disabling discoverable requires that no timeout is set,
1605 * and enabling limited discoverable requires a timeout.
1607 if ((cp->val == 0x00 && timeout > 0) ||
1608 (cp->val == 0x02 && timeout == 0))
1609 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1610 MGMT_STATUS_INVALID_PARAMS);
1614 if (!hdev_is_powered(hdev) && timeout > 0) {
1615 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1616 MGMT_STATUS_NOT_POWERED);
1620 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1621 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1622 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1627 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1628 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1629 MGMT_STATUS_REJECTED);
1633 if (!hdev_is_powered(hdev)) {
1634 bool changed = false;
1636 /* Setting limited discoverable when powered off is
1637 * not a valid operation since it requires a timeout
1638 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1640 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1641 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1645 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1650 err = new_settings(hdev, sk);
1655 /* If the current mode is the same, then just update the timeout
1656 * value with the new value. And if only the timeout gets updated,
1657 * then no need for any HCI transactions.
1659 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1660 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1661 &hdev->dev_flags)) {
1662 cancel_delayed_work(&hdev->discov_off);
1663 hdev->discov_timeout = timeout;
1665 if (cp->val && hdev->discov_timeout > 0) {
1666 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1667 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1671 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1675 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1681 /* Cancel any potential discoverable timeout that might be
1682 * still active and store new timeout value. The arming of
1683 * the timeout happens in the complete handler.
1685 cancel_delayed_work(&hdev->discov_off);
1686 hdev->discov_timeout = timeout;
1688 /* Limited discoverable mode */
1689 if (cp->val == 0x02)
1690 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1692 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1694 hci_req_init(&req, hdev);
1696 /* The procedure for LE-only controllers is much simpler - just
1697 * update the advertising data.
1699 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1705 struct hci_cp_write_current_iac_lap hci_cp;
1707 if (cp->val == 0x02) {
1708 /* Limited discoverable mode */
1709 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1710 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1711 hci_cp.iac_lap[1] = 0x8b;
1712 hci_cp.iac_lap[2] = 0x9e;
1713 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1714 hci_cp.iac_lap[4] = 0x8b;
1715 hci_cp.iac_lap[5] = 0x9e;
1717 /* General discoverable mode */
1719 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1720 hci_cp.iac_lap[1] = 0x8b;
1721 hci_cp.iac_lap[2] = 0x9e;
1724 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1725 (hci_cp.num_iac * 3) + 1, &hci_cp);
1727 scan |= SCAN_INQUIRY;
1729 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1732 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1735 update_adv_data(&req);
1737 err = hci_req_run(&req, set_discoverable_complete);
1739 mgmt_pending_remove(cmd);
1742 hci_dev_unlock(hdev);
1746 static void write_fast_connectable(struct hci_request *req, bool enable)
1748 struct hci_dev *hdev = req->hdev;
1749 struct hci_cp_write_page_scan_activity acp;
1752 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1755 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1759 type = PAGE_SCAN_TYPE_INTERLACED;
1761 /* 160 msec page scan interval */
1762 acp.interval = cpu_to_le16(0x0100);
1764 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1766 /* default 1.28 sec page scan */
1767 acp.interval = cpu_to_le16(0x0800);
1770 acp.window = cpu_to_le16(0x0012);
1772 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1773 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1774 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1777 if (hdev->page_scan_type != type)
1778 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1781 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1784 struct pending_cmd *cmd;
1785 struct mgmt_mode *cp;
1786 bool conn_changed, discov_changed;
1788 BT_DBG("status 0x%02x", status);
1792 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1797 u8 mgmt_err = mgmt_status(status);
1798 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1804 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1806 discov_changed = false;
1808 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1810 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1814 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1816 if (conn_changed || discov_changed) {
1817 new_settings(hdev, cmd->sk);
1818 hci_update_page_scan(hdev);
1820 mgmt_update_adv_data(hdev);
1821 hci_update_background_scan(hdev);
1825 mgmt_pending_remove(cmd);
1828 hci_dev_unlock(hdev);
1831 static int set_connectable_update_settings(struct hci_dev *hdev,
1832 struct sock *sk, u8 val)
1834 bool changed = false;
1837 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1841 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1843 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1844 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1847 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1852 hci_update_page_scan(hdev);
1853 hci_update_background_scan(hdev);
1854 return new_settings(hdev, sk);
1860 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1863 struct mgmt_mode *cp = data;
1864 struct pending_cmd *cmd;
1865 struct hci_request req;
1869 BT_DBG("request for %s", hdev->name);
1871 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1872 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1873 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1874 MGMT_STATUS_REJECTED);
1876 if (cp->val != 0x00 && cp->val != 0x01)
1877 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1878 MGMT_STATUS_INVALID_PARAMS);
1882 if (!hdev_is_powered(hdev)) {
1883 err = set_connectable_update_settings(hdev, sk, cp->val);
1887 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1888 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1889 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1894 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1900 hci_req_init(&req, hdev);
1902 /* If BR/EDR is not enabled and we disable advertising as a
1903 * by-product of disabling connectable, we need to update the
1904 * advertising flags.
1906 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1908 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1909 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1911 update_adv_data(&req);
1912 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1916 /* If we don't have any whitelist entries just
1917 * disable all scanning. If there are entries
1918 * and we had both page and inquiry scanning
1919 * enabled then fall back to only page scanning.
1920 * Otherwise no changes are needed.
1922 if (list_empty(&hdev->whitelist))
1923 scan = SCAN_DISABLED;
1924 else if (test_bit(HCI_ISCAN, &hdev->flags))
1927 goto no_scan_update;
1929 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1930 hdev->discov_timeout > 0)
1931 cancel_delayed_work(&hdev->discov_off);
1934 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1938 /* If we're going from non-connectable to connectable or
1939 * vice-versa when fast connectable is enabled ensure that fast
1940 * connectable gets disabled. write_fast_connectable won't do
1941 * anything if the page scan parameters are already what they
1944 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1945 write_fast_connectable(&req, false);
1947 /* Update the advertising parameters if necessary */
1948 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1949 enable_advertising(&req);
1951 err = hci_req_run(&req, set_connectable_complete);
1953 mgmt_pending_remove(cmd);
1954 if (err == -ENODATA)
1955 err = set_connectable_update_settings(hdev, sk,
1961 hci_dev_unlock(hdev);
1965 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1968 struct mgmt_mode *cp = data;
1972 BT_DBG("request for %s", hdev->name);
1974 if (cp->val != 0x00 && cp->val != 0x01)
1975 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1976 MGMT_STATUS_INVALID_PARAMS);
1981 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1983 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1985 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1990 err = new_settings(hdev, sk);
1993 hci_dev_unlock(hdev);
1997 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2000 struct mgmt_mode *cp = data;
2001 struct pending_cmd *cmd;
2005 BT_DBG("request for %s", hdev->name);
2007 status = mgmt_bredr_support(hdev);
2009 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2012 if (cp->val != 0x00 && cp->val != 0x01)
2013 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2014 MGMT_STATUS_INVALID_PARAMS);
2018 if (!hdev_is_powered(hdev)) {
2019 bool changed = false;
2021 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2022 &hdev->dev_flags)) {
2023 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2027 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2032 err = new_settings(hdev, sk);
2037 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2038 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2045 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2046 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2050 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2056 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2058 mgmt_pending_remove(cmd);
2063 hci_dev_unlock(hdev);
2067 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2069 struct mgmt_mode *cp = data;
2070 struct pending_cmd *cmd;
2074 BT_DBG("request for %s", hdev->name);
2076 status = mgmt_bredr_support(hdev);
2078 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2080 if (!lmp_ssp_capable(hdev))
2081 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2082 MGMT_STATUS_NOT_SUPPORTED);
2084 if (cp->val != 0x00 && cp->val != 0x01)
2085 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2086 MGMT_STATUS_INVALID_PARAMS);
2090 if (!hdev_is_powered(hdev)) {
2094 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2097 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2100 changed = test_and_clear_bit(HCI_HS_ENABLED,
2103 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2106 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2111 err = new_settings(hdev, sk);
2116 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2117 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2118 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2123 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2124 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2128 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2134 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2135 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2136 sizeof(cp->val), &cp->val);
2138 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2140 mgmt_pending_remove(cmd);
2145 hci_dev_unlock(hdev);
2149 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2151 struct mgmt_mode *cp = data;
2156 BT_DBG("request for %s", hdev->name);
2158 status = mgmt_bredr_support(hdev);
2160 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2162 if (!lmp_ssp_capable(hdev))
2163 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2164 MGMT_STATUS_NOT_SUPPORTED);
2166 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2167 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2168 MGMT_STATUS_REJECTED);
2170 if (cp->val != 0x00 && cp->val != 0x01)
2171 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2172 MGMT_STATUS_INVALID_PARAMS);
2177 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2179 if (hdev_is_powered(hdev)) {
2180 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2181 MGMT_STATUS_REJECTED);
2185 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2188 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2193 err = new_settings(hdev, sk);
2196 hci_dev_unlock(hdev);
2200 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2202 struct cmd_lookup match = { NULL, hdev };
2207 u8 mgmt_err = mgmt_status(status);
2209 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2214 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2216 new_settings(hdev, match.sk);
2221 /* Make sure the controller has a good default for
2222 * advertising data. Restrict the update to when LE
2223 * has actually been enabled. During power on, the
2224 * update in powered_update_hci will take care of it.
2226 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2227 struct hci_request req;
2229 hci_req_init(&req, hdev);
2230 update_adv_data(&req);
2231 update_scan_rsp_data(&req);
2232 __hci_update_background_scan(&req);
2233 hci_req_run(&req, NULL);
2237 hci_dev_unlock(hdev);
2240 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2242 struct mgmt_mode *cp = data;
2243 struct hci_cp_write_le_host_supported hci_cp;
2244 struct pending_cmd *cmd;
2245 struct hci_request req;
2249 BT_DBG("request for %s", hdev->name);
2251 if (!lmp_le_capable(hdev))
2252 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2253 MGMT_STATUS_NOT_SUPPORTED);
2255 if (cp->val != 0x00 && cp->val != 0x01)
2256 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2257 MGMT_STATUS_INVALID_PARAMS);
2259 /* LE-only devices do not allow toggling LE on/off */
2260 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2261 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2262 MGMT_STATUS_REJECTED);
2267 enabled = lmp_host_le_capable(hdev);
2269 if (!hdev_is_powered(hdev) || val == enabled) {
2270 bool changed = false;
2272 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2273 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2277 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2278 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2282 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2287 err = new_settings(hdev, sk);
2292 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2293 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2294 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2299 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2305 hci_req_init(&req, hdev);
2307 memset(&hci_cp, 0, sizeof(hci_cp));
2311 hci_cp.simul = 0x00;
2313 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2314 disable_advertising(&req);
2317 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2320 err = hci_req_run(&req, le_enable_complete);
2322 mgmt_pending_remove(cmd);
2325 hci_dev_unlock(hdev);
2329 /* This is a helper function to test for pending mgmt commands that can
2330 * cause CoD or EIR HCI commands. We can only allow one such pending
2331 * mgmt command at a time since otherwise we cannot easily track what
2332 * the current values are, will be, and based on that calculate if a new
2333 * HCI command needs to be sent and if yes with what value.
2335 static bool pending_eir_or_class(struct hci_dev *hdev)
2337 struct pending_cmd *cmd;
2339 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2340 switch (cmd->opcode) {
2341 case MGMT_OP_ADD_UUID:
2342 case MGMT_OP_REMOVE_UUID:
2343 case MGMT_OP_SET_DEV_CLASS:
2344 case MGMT_OP_SET_POWERED:
2352 static const u8 bluetooth_base_uuid[] = {
2353 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2354 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2357 static u8 get_uuid_size(const u8 *uuid)
2361 if (memcmp(uuid, bluetooth_base_uuid, 12))
2364 val = get_unaligned_le32(&uuid[12]);
2371 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2373 struct pending_cmd *cmd;
2377 cmd = mgmt_pending_find(mgmt_op, hdev);
2381 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2382 hdev->dev_class, 3);
2384 mgmt_pending_remove(cmd);
2387 hci_dev_unlock(hdev);
2390 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2392 BT_DBG("status 0x%02x", status);
2394 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2397 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2399 struct mgmt_cp_add_uuid *cp = data;
2400 struct pending_cmd *cmd;
2401 struct hci_request req;
2402 struct bt_uuid *uuid;
2405 BT_DBG("request for %s", hdev->name);
2409 if (pending_eir_or_class(hdev)) {
2410 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2415 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2421 memcpy(uuid->uuid, cp->uuid, 16);
2422 uuid->svc_hint = cp->svc_hint;
2423 uuid->size = get_uuid_size(cp->uuid);
2425 list_add_tail(&uuid->list, &hdev->uuids);
2427 hci_req_init(&req, hdev);
2432 err = hci_req_run(&req, add_uuid_complete);
2434 if (err != -ENODATA)
2437 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2438 hdev->dev_class, 3);
2442 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2451 hci_dev_unlock(hdev);
2455 static bool enable_service_cache(struct hci_dev *hdev)
2457 if (!hdev_is_powered(hdev))
2460 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2461 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2469 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2471 BT_DBG("status 0x%02x", status);
2473 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2476 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2479 struct mgmt_cp_remove_uuid *cp = data;
2480 struct pending_cmd *cmd;
2481 struct bt_uuid *match, *tmp;
2482 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2483 struct hci_request req;
2486 BT_DBG("request for %s", hdev->name);
2490 if (pending_eir_or_class(hdev)) {
2491 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2496 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2497 hci_uuids_clear(hdev);
2499 if (enable_service_cache(hdev)) {
2500 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2501 0, hdev->dev_class, 3);
2510 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2511 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2514 list_del(&match->list);
2520 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2521 MGMT_STATUS_INVALID_PARAMS);
2526 hci_req_init(&req, hdev);
2531 err = hci_req_run(&req, remove_uuid_complete);
2533 if (err != -ENODATA)
2536 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2537 hdev->dev_class, 3);
2541 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2550 hci_dev_unlock(hdev);
2554 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2556 BT_DBG("status 0x%02x", status);
2558 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2561 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2564 struct mgmt_cp_set_dev_class *cp = data;
2565 struct pending_cmd *cmd;
2566 struct hci_request req;
2569 BT_DBG("request for %s", hdev->name);
2571 if (!lmp_bredr_capable(hdev))
2572 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2573 MGMT_STATUS_NOT_SUPPORTED);
2577 if (pending_eir_or_class(hdev)) {
2578 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2583 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2584 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2585 MGMT_STATUS_INVALID_PARAMS);
2589 hdev->major_class = cp->major;
2590 hdev->minor_class = cp->minor;
2592 if (!hdev_is_powered(hdev)) {
2593 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2594 hdev->dev_class, 3);
2598 hci_req_init(&req, hdev);
2600 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2601 hci_dev_unlock(hdev);
2602 cancel_delayed_work_sync(&hdev->service_cache);
2609 err = hci_req_run(&req, set_class_complete);
2611 if (err != -ENODATA)
2614 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2615 hdev->dev_class, 3);
2619 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2628 hci_dev_unlock(hdev);
2632 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2635 struct mgmt_cp_load_link_keys *cp = data;
2636 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2637 sizeof(struct mgmt_link_key_info));
2638 u16 key_count, expected_len;
2642 BT_DBG("request for %s", hdev->name);
2644 if (!lmp_bredr_capable(hdev))
2645 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2646 MGMT_STATUS_NOT_SUPPORTED);
2648 key_count = __le16_to_cpu(cp->key_count);
2649 if (key_count > max_key_count) {
2650 BT_ERR("load_link_keys: too big key_count value %u",
2652 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2653 MGMT_STATUS_INVALID_PARAMS);
2656 expected_len = sizeof(*cp) + key_count *
2657 sizeof(struct mgmt_link_key_info);
2658 if (expected_len != len) {
2659 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2661 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2662 MGMT_STATUS_INVALID_PARAMS);
2665 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2666 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2667 MGMT_STATUS_INVALID_PARAMS);
2669 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2672 for (i = 0; i < key_count; i++) {
2673 struct mgmt_link_key_info *key = &cp->keys[i];
2675 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2676 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2677 MGMT_STATUS_INVALID_PARAMS);
2682 hci_link_keys_clear(hdev);
2685 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2688 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2692 new_settings(hdev, NULL);
2694 for (i = 0; i < key_count; i++) {
2695 struct mgmt_link_key_info *key = &cp->keys[i];
2697 /* Always ignore debug keys and require a new pairing if
2698 * the user wants to use them.
2700 if (key->type == HCI_LK_DEBUG_COMBINATION)
2703 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2704 key->type, key->pin_len, NULL);
2707 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2709 hci_dev_unlock(hdev);
2714 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2715 u8 addr_type, struct sock *skip_sk)
2717 struct mgmt_ev_device_unpaired ev;
2719 bacpy(&ev.addr.bdaddr, bdaddr);
2720 ev.addr.type = addr_type;
2722 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2726 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2729 struct mgmt_cp_unpair_device *cp = data;
2730 struct mgmt_rp_unpair_device rp;
2731 struct hci_cp_disconnect dc;
2732 struct pending_cmd *cmd;
2733 struct hci_conn *conn;
2736 memset(&rp, 0, sizeof(rp));
2737 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2738 rp.addr.type = cp->addr.type;
2740 if (!bdaddr_type_is_valid(cp->addr.type))
2741 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2742 MGMT_STATUS_INVALID_PARAMS,
2745 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2746 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2747 MGMT_STATUS_INVALID_PARAMS,
2752 if (!hdev_is_powered(hdev)) {
2753 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2754 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2758 if (cp->addr.type == BDADDR_BREDR) {
2759 /* If disconnection is requested, then look up the
2760 * connection. If the remote device is connected, it
2761 * will be later used to terminate the link.
2763 * Setting it to NULL explicitly will cause no
2764 * termination of the link.
2767 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2772 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2776 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2779 /* Defer clearing up the connection parameters
2780 * until closing to give a chance of keeping
2781 * them if a repairing happens.
2783 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2785 /* If disconnection is not requested, then
2786 * clear the connection variable so that the
2787 * link is not terminated.
2789 if (!cp->disconnect)
2793 if (cp->addr.type == BDADDR_LE_PUBLIC)
2794 addr_type = ADDR_LE_DEV_PUBLIC;
2796 addr_type = ADDR_LE_DEV_RANDOM;
2798 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2800 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2804 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2805 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2809 /* If the connection variable is set, then termination of the
2810 * link is requested.
2813 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2815 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2819 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2826 cmd->cmd_complete = addr_cmd_complete;
2828 dc.handle = cpu_to_le16(conn->handle);
2829 dc.reason = 0x13; /* Remote User Terminated Connection */
2830 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2832 mgmt_pending_remove(cmd);
2835 hci_dev_unlock(hdev);
2839 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2842 struct mgmt_cp_disconnect *cp = data;
2843 struct mgmt_rp_disconnect rp;
2844 struct pending_cmd *cmd;
2845 struct hci_conn *conn;
2850 memset(&rp, 0, sizeof(rp));
2851 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2852 rp.addr.type = cp->addr.type;
2854 if (!bdaddr_type_is_valid(cp->addr.type))
2855 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2856 MGMT_STATUS_INVALID_PARAMS,
2861 if (!test_bit(HCI_UP, &hdev->flags)) {
2862 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2863 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2867 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2868 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2869 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2873 if (cp->addr.type == BDADDR_BREDR)
2874 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2877 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2879 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2880 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2881 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2885 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2891 cmd->cmd_complete = generic_cmd_complete;
2893 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2895 mgmt_pending_remove(cmd);
2898 hci_dev_unlock(hdev);
2902 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2904 switch (link_type) {
2906 switch (addr_type) {
2907 case ADDR_LE_DEV_PUBLIC:
2908 return BDADDR_LE_PUBLIC;
2911 /* Fallback to LE Random address type */
2912 return BDADDR_LE_RANDOM;
2916 /* Fallback to BR/EDR type */
2917 return BDADDR_BREDR;
2921 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2924 struct mgmt_rp_get_connections *rp;
2934 if (!hdev_is_powered(hdev)) {
2935 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2936 MGMT_STATUS_NOT_POWERED);
2941 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2942 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2946 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2947 rp = kmalloc(rp_len, GFP_KERNEL);
2954 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2955 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2957 bacpy(&rp->addr[i].bdaddr, &c->dst);
2958 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2959 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2964 rp->conn_count = cpu_to_le16(i);
2966 /* Recalculate length in case of filtered SCO connections, etc */
2967 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2969 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2975 hci_dev_unlock(hdev);
2979 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2980 struct mgmt_cp_pin_code_neg_reply *cp)
2982 struct pending_cmd *cmd;
2985 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2990 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2991 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2993 mgmt_pending_remove(cmd);
2998 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3001 struct hci_conn *conn;
3002 struct mgmt_cp_pin_code_reply *cp = data;
3003 struct hci_cp_pin_code_reply reply;
3004 struct pending_cmd *cmd;
3011 if (!hdev_is_powered(hdev)) {
3012 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3013 MGMT_STATUS_NOT_POWERED);
3017 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3019 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3020 MGMT_STATUS_NOT_CONNECTED);
3024 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3025 struct mgmt_cp_pin_code_neg_reply ncp;
3027 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3029 BT_ERR("PIN code is not 16 bytes long");
3031 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3033 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3034 MGMT_STATUS_INVALID_PARAMS);
3039 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3045 cmd->cmd_complete = addr_cmd_complete;
3047 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3048 reply.pin_len = cp->pin_len;
3049 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3051 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3053 mgmt_pending_remove(cmd);
3056 hci_dev_unlock(hdev);
3060 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3063 struct mgmt_cp_set_io_capability *cp = data;
3067 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3068 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3069 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3073 hdev->io_capability = cp->io_capability;
3075 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3076 hdev->io_capability);
3078 hci_dev_unlock(hdev);
3080 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3084 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3086 struct hci_dev *hdev = conn->hdev;
3087 struct pending_cmd *cmd;
3089 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3090 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3093 if (cmd->user_data != conn)
3102 static int pairing_complete(struct pending_cmd *cmd, u8 status)
3104 struct mgmt_rp_pair_device rp;
3105 struct hci_conn *conn = cmd->user_data;
3108 bacpy(&rp.addr.bdaddr, &conn->dst);
3109 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3111 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3114 /* So we don't get further callbacks for this connection */
3115 conn->connect_cfm_cb = NULL;
3116 conn->security_cfm_cb = NULL;
3117 conn->disconn_cfm_cb = NULL;
3119 hci_conn_drop(conn);
3121 /* The device is paired so there is no need to remove
3122 * its connection parameters anymore.
3124 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3131 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3133 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3134 struct pending_cmd *cmd;
3136 cmd = find_pairing(conn);
3138 cmd->cmd_complete(cmd, status);
3139 mgmt_pending_remove(cmd);
3143 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3145 struct pending_cmd *cmd;
3147 BT_DBG("status %u", status);
3149 cmd = find_pairing(conn);
3151 BT_DBG("Unable to find a pending command");
3155 cmd->cmd_complete(cmd, mgmt_status(status));
3156 mgmt_pending_remove(cmd);
3159 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3161 struct pending_cmd *cmd;
3163 BT_DBG("status %u", status);
3168 cmd = find_pairing(conn);
3170 BT_DBG("Unable to find a pending command");
3174 cmd->cmd_complete(cmd, mgmt_status(status));
3175 mgmt_pending_remove(cmd);
3178 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3181 struct mgmt_cp_pair_device *cp = data;
3182 struct mgmt_rp_pair_device rp;
3183 struct pending_cmd *cmd;
3184 u8 sec_level, auth_type;
3185 struct hci_conn *conn;
3190 memset(&rp, 0, sizeof(rp));
3191 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3192 rp.addr.type = cp->addr.type;
3194 if (!bdaddr_type_is_valid(cp->addr.type))
3195 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3196 MGMT_STATUS_INVALID_PARAMS,
3199 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3200 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3201 MGMT_STATUS_INVALID_PARAMS,
3206 if (!hdev_is_powered(hdev)) {
3207 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3208 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3212 sec_level = BT_SECURITY_MEDIUM;
3213 auth_type = HCI_AT_DEDICATED_BONDING;
3215 if (cp->addr.type == BDADDR_BREDR) {
3216 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3221 /* Convert from L2CAP channel address type to HCI address type
3223 if (cp->addr.type == BDADDR_LE_PUBLIC)
3224 addr_type = ADDR_LE_DEV_PUBLIC;
3226 addr_type = ADDR_LE_DEV_RANDOM;
3228 /* When pairing a new device, it is expected to remember
3229 * this device for future connections. Adding the connection
3230 * parameter information ahead of time allows tracking
3231 * of the slave preferred values and will speed up any
3232 * further connection establishment.
3234 * If connection parameters already exist, then they
3235 * will be kept and this function does nothing.
3237 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3239 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3240 sec_level, HCI_LE_CONN_TIMEOUT,
3247 if (PTR_ERR(conn) == -EBUSY)
3248 status = MGMT_STATUS_BUSY;
3250 status = MGMT_STATUS_CONNECT_FAILED;
3252 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3258 if (conn->connect_cfm_cb) {
3259 hci_conn_drop(conn);
3260 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3261 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3265 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3268 hci_conn_drop(conn);
3272 cmd->cmd_complete = pairing_complete;
3274 /* For LE, just connecting isn't a proof that the pairing finished */
3275 if (cp->addr.type == BDADDR_BREDR) {
3276 conn->connect_cfm_cb = pairing_complete_cb;
3277 conn->security_cfm_cb = pairing_complete_cb;
3278 conn->disconn_cfm_cb = pairing_complete_cb;
3280 conn->connect_cfm_cb = le_pairing_complete_cb;
3281 conn->security_cfm_cb = le_pairing_complete_cb;
3282 conn->disconn_cfm_cb = le_pairing_complete_cb;
3285 conn->io_capability = cp->io_cap;
3286 cmd->user_data = hci_conn_get(conn);
3288 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3289 hci_conn_security(conn, sec_level, auth_type, true)) {
3290 cmd->cmd_complete(cmd, 0);
3291 mgmt_pending_remove(cmd);
3297 hci_dev_unlock(hdev);
3301 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3304 struct mgmt_addr_info *addr = data;
3305 struct pending_cmd *cmd;
3306 struct hci_conn *conn;
3313 if (!hdev_is_powered(hdev)) {
3314 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3315 MGMT_STATUS_NOT_POWERED);
3319 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3321 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3322 MGMT_STATUS_INVALID_PARAMS);
3326 conn = cmd->user_data;
3328 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3329 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3330 MGMT_STATUS_INVALID_PARAMS);
3334 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3335 mgmt_pending_remove(cmd);
3337 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3338 addr, sizeof(*addr));
3340 hci_dev_unlock(hdev);
3344 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3345 struct mgmt_addr_info *addr, u16 mgmt_op,
3346 u16 hci_op, __le32 passkey)
3348 struct pending_cmd *cmd;
3349 struct hci_conn *conn;
3354 if (!hdev_is_powered(hdev)) {
3355 err = cmd_complete(sk, hdev->id, mgmt_op,
3356 MGMT_STATUS_NOT_POWERED, addr,
3361 if (addr->type == BDADDR_BREDR)
3362 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3364 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3367 err = cmd_complete(sk, hdev->id, mgmt_op,
3368 MGMT_STATUS_NOT_CONNECTED, addr,
3373 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3374 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3376 err = cmd_complete(sk, hdev->id, mgmt_op,
3377 MGMT_STATUS_SUCCESS, addr,
3380 err = cmd_complete(sk, hdev->id, mgmt_op,
3381 MGMT_STATUS_FAILED, addr,
3387 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3393 cmd->cmd_complete = addr_cmd_complete;
3395 /* Continue with pairing via HCI */
3396 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3397 struct hci_cp_user_passkey_reply cp;
3399 bacpy(&cp.bdaddr, &addr->bdaddr);
3400 cp.passkey = passkey;
3401 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3403 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3407 mgmt_pending_remove(cmd);
3410 hci_dev_unlock(hdev);
3414 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3415 void *data, u16 len)
3417 struct mgmt_cp_pin_code_neg_reply *cp = data;
3421 return user_pairing_resp(sk, hdev, &cp->addr,
3422 MGMT_OP_PIN_CODE_NEG_REPLY,
3423 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3426 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3429 struct mgmt_cp_user_confirm_reply *cp = data;
3433 if (len != sizeof(*cp))
3434 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3435 MGMT_STATUS_INVALID_PARAMS);
3437 return user_pairing_resp(sk, hdev, &cp->addr,
3438 MGMT_OP_USER_CONFIRM_REPLY,
3439 HCI_OP_USER_CONFIRM_REPLY, 0);
3442 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3443 void *data, u16 len)
3445 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3449 return user_pairing_resp(sk, hdev, &cp->addr,
3450 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3451 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3454 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3457 struct mgmt_cp_user_passkey_reply *cp = data;
3461 return user_pairing_resp(sk, hdev, &cp->addr,
3462 MGMT_OP_USER_PASSKEY_REPLY,
3463 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3466 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3467 void *data, u16 len)
3469 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3473 return user_pairing_resp(sk, hdev, &cp->addr,
3474 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3475 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3478 static void update_name(struct hci_request *req)
3480 struct hci_dev *hdev = req->hdev;
3481 struct hci_cp_write_local_name cp;
3483 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3485 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3488 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3490 struct mgmt_cp_set_local_name *cp;
3491 struct pending_cmd *cmd;
3493 BT_DBG("status 0x%02x", status);
3497 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3504 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3505 mgmt_status(status));
3507 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3510 mgmt_pending_remove(cmd);
3513 hci_dev_unlock(hdev);
3516 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3519 struct mgmt_cp_set_local_name *cp = data;
3520 struct pending_cmd *cmd;
3521 struct hci_request req;
3528 /* If the old values are the same as the new ones just return a
3529 * direct command complete event.
3531 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3532 !memcmp(hdev->short_name, cp->short_name,
3533 sizeof(hdev->short_name))) {
3534 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3539 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3541 if (!hdev_is_powered(hdev)) {
3542 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3544 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3549 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3555 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3561 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3563 hci_req_init(&req, hdev);
3565 if (lmp_bredr_capable(hdev)) {
3570 /* The name is stored in the scan response data and so
3571 * no need to udpate the advertising data here.
3573 if (lmp_le_capable(hdev))
3574 update_scan_rsp_data(&req);
3576 err = hci_req_run(&req, set_name_complete);
3578 mgmt_pending_remove(cmd);
3581 hci_dev_unlock(hdev);
3585 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3586 void *data, u16 data_len)
3588 struct pending_cmd *cmd;
3591 BT_DBG("%s", hdev->name);
3595 if (!hdev_is_powered(hdev)) {
3596 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3597 MGMT_STATUS_NOT_POWERED);
3601 if (!lmp_ssp_capable(hdev)) {
3602 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3603 MGMT_STATUS_NOT_SUPPORTED);
3607 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3608 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3613 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3619 if (bredr_sc_enabled(hdev))
3620 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3623 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3626 mgmt_pending_remove(cmd);
3629 hci_dev_unlock(hdev);
3633 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3634 void *data, u16 len)
3636 struct mgmt_addr_info *addr = data;
3639 BT_DBG("%s ", hdev->name);
3641 if (!bdaddr_type_is_valid(addr->type))
3642 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3643 MGMT_STATUS_INVALID_PARAMS, addr,
3648 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3649 struct mgmt_cp_add_remote_oob_data *cp = data;
3652 if (cp->addr.type != BDADDR_BREDR) {
3653 err = cmd_complete(sk, hdev->id,
3654 MGMT_OP_ADD_REMOTE_OOB_DATA,
3655 MGMT_STATUS_INVALID_PARAMS,
3656 &cp->addr, sizeof(cp->addr));
3660 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3661 cp->addr.type, cp->hash,
3662 cp->rand, NULL, NULL);
3664 status = MGMT_STATUS_FAILED;
3666 status = MGMT_STATUS_SUCCESS;
3668 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3669 status, &cp->addr, sizeof(cp->addr));
3670 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3671 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3672 u8 *rand192, *hash192;
3675 if (cp->addr.type != BDADDR_BREDR) {
3676 err = cmd_complete(sk, hdev->id,
3677 MGMT_OP_ADD_REMOTE_OOB_DATA,
3678 MGMT_STATUS_INVALID_PARAMS,
3679 &cp->addr, sizeof(cp->addr));
3683 if (bdaddr_type_is_le(cp->addr.type)) {
3687 rand192 = cp->rand192;
3688 hash192 = cp->hash192;
3691 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3692 cp->addr.type, hash192, rand192,
3693 cp->hash256, cp->rand256);
3695 status = MGMT_STATUS_FAILED;
3697 status = MGMT_STATUS_SUCCESS;
3699 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3700 status, &cp->addr, sizeof(cp->addr));
3702 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3703 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3704 MGMT_STATUS_INVALID_PARAMS);
3708 hci_dev_unlock(hdev);
3712 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3713 void *data, u16 len)
3715 struct mgmt_cp_remove_remote_oob_data *cp = data;
3719 BT_DBG("%s", hdev->name);
3721 if (cp->addr.type != BDADDR_BREDR)
3722 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3723 MGMT_STATUS_INVALID_PARAMS,
3724 &cp->addr, sizeof(cp->addr));
3728 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3729 hci_remote_oob_data_clear(hdev);
3730 status = MGMT_STATUS_SUCCESS;
3734 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3736 status = MGMT_STATUS_INVALID_PARAMS;
3738 status = MGMT_STATUS_SUCCESS;
3741 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3742 status, &cp->addr, sizeof(cp->addr));
3744 hci_dev_unlock(hdev);
3748 static bool trigger_discovery(struct hci_request *req, u8 *status)
3750 struct hci_dev *hdev = req->hdev;
3751 struct hci_cp_le_set_scan_param param_cp;
3752 struct hci_cp_le_set_scan_enable enable_cp;
3753 struct hci_cp_inquiry inq_cp;
3754 /* General inquiry access code (GIAC) */
3755 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3759 switch (hdev->discovery.type) {
3760 case DISCOV_TYPE_BREDR:
3761 *status = mgmt_bredr_support(hdev);
3765 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3766 *status = MGMT_STATUS_BUSY;
3770 hci_inquiry_cache_flush(hdev);
3772 memset(&inq_cp, 0, sizeof(inq_cp));
3773 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3774 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3775 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3778 case DISCOV_TYPE_LE:
3779 case DISCOV_TYPE_INTERLEAVED:
3780 *status = mgmt_le_support(hdev);
3784 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3785 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3786 *status = MGMT_STATUS_NOT_SUPPORTED;
3790 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3791 /* Don't let discovery abort an outgoing
3792 * connection attempt that's using directed
3795 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3797 *status = MGMT_STATUS_REJECTED;
3801 disable_advertising(req);
3804 /* If controller is scanning, it means the background scanning
3805 * is running. Thus, we should temporarily stop it in order to
3806 * set the discovery scanning parameters.
3808 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3809 hci_req_add_le_scan_disable(req);
3811 memset(¶m_cp, 0, sizeof(param_cp));
3813 /* All active scans will be done with either a resolvable
3814 * private address (when privacy feature has been enabled)
3815 * or non-resolvable private address.
3817 err = hci_update_random_address(req, true, &own_addr_type);
3819 *status = MGMT_STATUS_FAILED;
3823 param_cp.type = LE_SCAN_ACTIVE;
3824 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3825 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3826 param_cp.own_address_type = own_addr_type;
3827 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3830 memset(&enable_cp, 0, sizeof(enable_cp));
3831 enable_cp.enable = LE_SCAN_ENABLE;
3832 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3833 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3838 *status = MGMT_STATUS_INVALID_PARAMS;
3845 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3848 struct pending_cmd *cmd;
3849 unsigned long timeout;
3851 BT_DBG("status %d", status);
3855 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3857 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3860 cmd->cmd_complete(cmd, mgmt_status(status));
3861 mgmt_pending_remove(cmd);
3865 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3869 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3871 switch (hdev->discovery.type) {
3872 case DISCOV_TYPE_LE:
3873 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3875 case DISCOV_TYPE_INTERLEAVED:
3876 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3878 case DISCOV_TYPE_BREDR:
3882 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3888 queue_delayed_work(hdev->workqueue,
3889 &hdev->le_scan_disable, timeout);
3892 hci_dev_unlock(hdev);
3895 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3896 void *data, u16 len)
3898 struct mgmt_cp_start_discovery *cp = data;
3899 struct pending_cmd *cmd;
3900 struct hci_request req;
3904 BT_DBG("%s", hdev->name);
3908 if (!hdev_is_powered(hdev)) {
3909 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3910 MGMT_STATUS_NOT_POWERED,
3911 &cp->type, sizeof(cp->type));
3915 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3916 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3917 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3918 MGMT_STATUS_BUSY, &cp->type,
3923 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3929 cmd->cmd_complete = generic_cmd_complete;
3931 /* Clear the discovery filter first to free any previously
3932 * allocated memory for the UUID list.
3934 hci_discovery_filter_clear(hdev);
3936 hdev->discovery.type = cp->type;
3937 hdev->discovery.report_invalid_rssi = false;
3939 hci_req_init(&req, hdev);
3941 if (!trigger_discovery(&req, &status)) {
3942 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3943 status, &cp->type, sizeof(cp->type));
3944 mgmt_pending_remove(cmd);
3948 err = hci_req_run(&req, start_discovery_complete);
3950 mgmt_pending_remove(cmd);
3954 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3957 hci_dev_unlock(hdev);
3961 static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
3963 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3967 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3968 void *data, u16 len)
3970 struct mgmt_cp_start_service_discovery *cp = data;
3971 struct pending_cmd *cmd;
3972 struct hci_request req;
3973 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3974 u16 uuid_count, expected_len;
3978 BT_DBG("%s", hdev->name);
3982 if (!hdev_is_powered(hdev)) {
3983 err = cmd_complete(sk, hdev->id,
3984 MGMT_OP_START_SERVICE_DISCOVERY,
3985 MGMT_STATUS_NOT_POWERED,
3986 &cp->type, sizeof(cp->type));
3990 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3991 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3992 err = cmd_complete(sk, hdev->id,
3993 MGMT_OP_START_SERVICE_DISCOVERY,
3994 MGMT_STATUS_BUSY, &cp->type,
3999 uuid_count = __le16_to_cpu(cp->uuid_count);
4000 if (uuid_count > max_uuid_count) {
4001 BT_ERR("service_discovery: too big uuid_count value %u",
4003 err = cmd_complete(sk, hdev->id,
4004 MGMT_OP_START_SERVICE_DISCOVERY,
4005 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4010 expected_len = sizeof(*cp) + uuid_count * 16;
4011 if (expected_len != len) {
4012 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4014 err = cmd_complete(sk, hdev->id,
4015 MGMT_OP_START_SERVICE_DISCOVERY,
4016 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4021 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4028 cmd->cmd_complete = service_discovery_cmd_complete;
4030 /* Clear the discovery filter first to free any previously
4031 * allocated memory for the UUID list.
4033 hci_discovery_filter_clear(hdev);
4035 hdev->discovery.type = cp->type;
4036 hdev->discovery.rssi = cp->rssi;
4037 hdev->discovery.uuid_count = uuid_count;
4039 if (uuid_count > 0) {
4040 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4042 if (!hdev->discovery.uuids) {
4043 err = cmd_complete(sk, hdev->id,
4044 MGMT_OP_START_SERVICE_DISCOVERY,
4046 &cp->type, sizeof(cp->type));
4047 mgmt_pending_remove(cmd);
4052 hci_req_init(&req, hdev);
4054 if (!trigger_discovery(&req, &status)) {
4055 err = cmd_complete(sk, hdev->id,
4056 MGMT_OP_START_SERVICE_DISCOVERY,
4057 status, &cp->type, sizeof(cp->type));
4058 mgmt_pending_remove(cmd);
4062 err = hci_req_run(&req, start_discovery_complete);
4064 mgmt_pending_remove(cmd);
4068 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4071 hci_dev_unlock(hdev);
4075 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4077 struct pending_cmd *cmd;
4079 BT_DBG("status %d", status);
4083 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4085 cmd->cmd_complete(cmd, mgmt_status(status));
4086 mgmt_pending_remove(cmd);
4090 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4092 hci_dev_unlock(hdev);
4095 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4098 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4099 struct pending_cmd *cmd;
4100 struct hci_request req;
4103 BT_DBG("%s", hdev->name);
4107 if (!hci_discovery_active(hdev)) {
4108 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4109 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4110 sizeof(mgmt_cp->type));
4114 if (hdev->discovery.type != mgmt_cp->type) {
4115 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4116 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4117 sizeof(mgmt_cp->type));
4121 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4127 cmd->cmd_complete = generic_cmd_complete;
4129 hci_req_init(&req, hdev);
4131 hci_stop_discovery(&req);
4133 err = hci_req_run(&req, stop_discovery_complete);
4135 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4139 mgmt_pending_remove(cmd);
4141 /* If no HCI commands were sent we're done */
4142 if (err == -ENODATA) {
4143 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4144 &mgmt_cp->type, sizeof(mgmt_cp->type));
4145 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4149 hci_dev_unlock(hdev);
4153 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4156 struct mgmt_cp_confirm_name *cp = data;
4157 struct inquiry_entry *e;
4160 BT_DBG("%s", hdev->name);
4164 if (!hci_discovery_active(hdev)) {
4165 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4166 MGMT_STATUS_FAILED, &cp->addr,
4171 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4173 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4174 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4179 if (cp->name_known) {
4180 e->name_state = NAME_KNOWN;
4183 e->name_state = NAME_NEEDED;
4184 hci_inquiry_cache_update_resolve(hdev, e);
4187 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4191 hci_dev_unlock(hdev);
4195 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4198 struct mgmt_cp_block_device *cp = data;
4202 BT_DBG("%s", hdev->name);
4204 if (!bdaddr_type_is_valid(cp->addr.type))
4205 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4206 MGMT_STATUS_INVALID_PARAMS,
4207 &cp->addr, sizeof(cp->addr));
4211 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4214 status = MGMT_STATUS_FAILED;
4218 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4220 status = MGMT_STATUS_SUCCESS;
4223 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4224 &cp->addr, sizeof(cp->addr));
4226 hci_dev_unlock(hdev);
4231 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4234 struct mgmt_cp_unblock_device *cp = data;
4238 BT_DBG("%s", hdev->name);
4240 if (!bdaddr_type_is_valid(cp->addr.type))
4241 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4242 MGMT_STATUS_INVALID_PARAMS,
4243 &cp->addr, sizeof(cp->addr));
4247 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4250 status = MGMT_STATUS_INVALID_PARAMS;
4254 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4256 status = MGMT_STATUS_SUCCESS;
4259 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4260 &cp->addr, sizeof(cp->addr));
4262 hci_dev_unlock(hdev);
4267 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4270 struct mgmt_cp_set_device_id *cp = data;
4271 struct hci_request req;
4275 BT_DBG("%s", hdev->name);
4277 source = __le16_to_cpu(cp->source);
4279 if (source > 0x0002)
4280 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4281 MGMT_STATUS_INVALID_PARAMS);
4285 hdev->devid_source = source;
4286 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4287 hdev->devid_product = __le16_to_cpu(cp->product);
4288 hdev->devid_version = __le16_to_cpu(cp->version);
4290 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4292 hci_req_init(&req, hdev);
4294 hci_req_run(&req, NULL);
4296 hci_dev_unlock(hdev);
4301 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4304 struct cmd_lookup match = { NULL, hdev };
4309 u8 mgmt_err = mgmt_status(status);
4311 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4312 cmd_status_rsp, &mgmt_err);
4316 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4317 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4319 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4321 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4324 new_settings(hdev, match.sk);
4330 hci_dev_unlock(hdev);
4333 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4336 struct mgmt_mode *cp = data;
4337 struct pending_cmd *cmd;
4338 struct hci_request req;
4339 u8 val, enabled, status;
4342 BT_DBG("request for %s", hdev->name);
4344 status = mgmt_le_support(hdev);
4346 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4349 if (cp->val != 0x00 && cp->val != 0x01)
4350 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4351 MGMT_STATUS_INVALID_PARAMS);
4356 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4358 /* The following conditions are ones which mean that we should
4359 * not do any HCI communication but directly send a mgmt
4360 * response to user space (after toggling the flag if
4363 if (!hdev_is_powered(hdev) || val == enabled ||
4364 hci_conn_num(hdev, LE_LINK) > 0 ||
4365 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4366 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4367 bool changed = false;
4369 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4370 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4374 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4379 err = new_settings(hdev, sk);
4384 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4385 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4386 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4391 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4397 hci_req_init(&req, hdev);
4400 enable_advertising(&req);
4402 disable_advertising(&req);
4404 err = hci_req_run(&req, set_advertising_complete);
4406 mgmt_pending_remove(cmd);
4409 hci_dev_unlock(hdev);
4413 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4414 void *data, u16 len)
4416 struct mgmt_cp_set_static_address *cp = data;
4419 BT_DBG("%s", hdev->name);
4421 if (!lmp_le_capable(hdev))
4422 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4423 MGMT_STATUS_NOT_SUPPORTED);
4425 if (hdev_is_powered(hdev))
4426 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4427 MGMT_STATUS_REJECTED);
4429 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4430 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4431 return cmd_status(sk, hdev->id,
4432 MGMT_OP_SET_STATIC_ADDRESS,
4433 MGMT_STATUS_INVALID_PARAMS);
4435 /* Two most significant bits shall be set */
4436 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4437 return cmd_status(sk, hdev->id,
4438 MGMT_OP_SET_STATIC_ADDRESS,
4439 MGMT_STATUS_INVALID_PARAMS);
4444 bacpy(&hdev->static_addr, &cp->bdaddr);
4446 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4448 hci_dev_unlock(hdev);
4453 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4454 void *data, u16 len)
4456 struct mgmt_cp_set_scan_params *cp = data;
4457 __u16 interval, window;
4460 BT_DBG("%s", hdev->name);
4462 if (!lmp_le_capable(hdev))
4463 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4464 MGMT_STATUS_NOT_SUPPORTED);
4466 interval = __le16_to_cpu(cp->interval);
4468 if (interval < 0x0004 || interval > 0x4000)
4469 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4470 MGMT_STATUS_INVALID_PARAMS);
4472 window = __le16_to_cpu(cp->window);
4474 if (window < 0x0004 || window > 0x4000)
4475 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4476 MGMT_STATUS_INVALID_PARAMS);
4478 if (window > interval)
4479 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4480 MGMT_STATUS_INVALID_PARAMS);
4484 hdev->le_scan_interval = interval;
4485 hdev->le_scan_window = window;
4487 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4489 /* If background scan is running, restart it so new parameters are
4492 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4493 hdev->discovery.state == DISCOVERY_STOPPED) {
4494 struct hci_request req;
4496 hci_req_init(&req, hdev);
4498 hci_req_add_le_scan_disable(&req);
4499 hci_req_add_le_passive_scan(&req);
4501 hci_req_run(&req, NULL);
4504 hci_dev_unlock(hdev);
4509 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4512 struct pending_cmd *cmd;
4514 BT_DBG("status 0x%02x", status);
4518 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4523 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4524 mgmt_status(status));
4526 struct mgmt_mode *cp = cmd->param;
4529 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4531 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4533 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4534 new_settings(hdev, cmd->sk);
4537 mgmt_pending_remove(cmd);
4540 hci_dev_unlock(hdev);
4543 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4544 void *data, u16 len)
4546 struct mgmt_mode *cp = data;
4547 struct pending_cmd *cmd;
4548 struct hci_request req;
4551 BT_DBG("%s", hdev->name);
4553 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4554 hdev->hci_ver < BLUETOOTH_VER_1_2)
4555 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4556 MGMT_STATUS_NOT_SUPPORTED);
4558 if (cp->val != 0x00 && cp->val != 0x01)
4559 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4560 MGMT_STATUS_INVALID_PARAMS);
4562 if (!hdev_is_powered(hdev))
4563 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4564 MGMT_STATUS_NOT_POWERED);
4566 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4567 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4568 MGMT_STATUS_REJECTED);
4572 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4573 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4578 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4579 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4584 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4591 hci_req_init(&req, hdev);
4593 write_fast_connectable(&req, cp->val);
4595 err = hci_req_run(&req, fast_connectable_complete);
4597 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4598 MGMT_STATUS_FAILED);
4599 mgmt_pending_remove(cmd);
4603 hci_dev_unlock(hdev);
4608 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4610 struct pending_cmd *cmd;
4612 BT_DBG("status 0x%02x", status);
4616 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4621 u8 mgmt_err = mgmt_status(status);
4623 /* We need to restore the flag if related HCI commands
4626 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4628 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4630 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4631 new_settings(hdev, cmd->sk);
4634 mgmt_pending_remove(cmd);
4637 hci_dev_unlock(hdev);
4640 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4642 struct mgmt_mode *cp = data;
4643 struct pending_cmd *cmd;
4644 struct hci_request req;
4647 BT_DBG("request for %s", hdev->name);
4649 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4650 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4651 MGMT_STATUS_NOT_SUPPORTED);
4653 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4654 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4655 MGMT_STATUS_REJECTED);
4657 if (cp->val != 0x00 && cp->val != 0x01)
4658 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4659 MGMT_STATUS_INVALID_PARAMS);
4663 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4664 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4668 if (!hdev_is_powered(hdev)) {
4670 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4671 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4672 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4673 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4674 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4677 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4679 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4683 err = new_settings(hdev, sk);
4687 /* Reject disabling when powered on */
4689 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4690 MGMT_STATUS_REJECTED);
4693 /* When configuring a dual-mode controller to operate
4694 * with LE only and using a static address, then switching
4695 * BR/EDR back on is not allowed.
4697 * Dual-mode controllers shall operate with the public
4698 * address as its identity address for BR/EDR and LE. So
4699 * reject the attempt to create an invalid configuration.
4701 * The same restrictions applies when secure connections
4702 * has been enabled. For BR/EDR this is a controller feature
4703 * while for LE it is a host stack feature. This means that
4704 * switching BR/EDR back on when secure connections has been
4705 * enabled is not a supported transaction.
4707 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4708 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4709 test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) {
4710 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4711 MGMT_STATUS_REJECTED);
4716 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4717 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4722 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4728 /* We need to flip the bit already here so that update_adv_data
4729 * generates the correct flags.
4731 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4733 hci_req_init(&req, hdev);
4735 write_fast_connectable(&req, false);
4736 __hci_update_page_scan(&req);
4738 /* Since only the advertising data flags will change, there
4739 * is no need to update the scan response data.
4741 update_adv_data(&req);
4743 err = hci_req_run(&req, set_bredr_complete);
4745 mgmt_pending_remove(cmd);
4748 hci_dev_unlock(hdev);
4752 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4753 void *data, u16 len)
4755 struct mgmt_mode *cp = data;
4756 struct pending_cmd *cmd;
4760 BT_DBG("request for %s", hdev->name);
4762 if (!lmp_sc_capable(hdev) &&
4763 !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4764 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4765 MGMT_STATUS_NOT_SUPPORTED);
4767 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4768 !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4769 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4770 MGMT_STATUS_REJECTED);
4772 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4773 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4774 MGMT_STATUS_INVALID_PARAMS);
4778 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4779 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4783 changed = !test_and_set_bit(HCI_SC_ENABLED,
4785 if (cp->val == 0x02)
4786 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4788 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4790 changed = test_and_clear_bit(HCI_SC_ENABLED,
4792 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4795 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4800 err = new_settings(hdev, sk);
4805 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4806 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4813 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4814 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4815 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4819 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4825 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4827 mgmt_pending_remove(cmd);
4831 if (cp->val == 0x02)
4832 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4834 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4837 hci_dev_unlock(hdev);
4841 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4842 void *data, u16 len)
4844 struct mgmt_mode *cp = data;
4845 bool changed, use_changed;
4848 BT_DBG("request for %s", hdev->name);
4850 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4851 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4852 MGMT_STATUS_INVALID_PARAMS);
4857 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4860 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4863 if (cp->val == 0x02)
4864 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4867 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4870 if (hdev_is_powered(hdev) && use_changed &&
4871 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4872 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4873 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4874 sizeof(mode), &mode);
4877 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4882 err = new_settings(hdev, sk);
4885 hci_dev_unlock(hdev);
4889 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4892 struct mgmt_cp_set_privacy *cp = cp_data;
4896 BT_DBG("request for %s", hdev->name);
4898 if (!lmp_le_capable(hdev))
4899 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4900 MGMT_STATUS_NOT_SUPPORTED);
4902 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4903 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4904 MGMT_STATUS_INVALID_PARAMS);
4906 if (hdev_is_powered(hdev))
4907 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4908 MGMT_STATUS_REJECTED);
4912 /* If user space supports this command it is also expected to
4913 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4915 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4918 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4919 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4920 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4922 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4923 memset(hdev->irk, 0, sizeof(hdev->irk));
4924 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4927 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4932 err = new_settings(hdev, sk);
4935 hci_dev_unlock(hdev);
4939 static bool irk_is_valid(struct mgmt_irk_info *irk)
4941 switch (irk->addr.type) {
4942 case BDADDR_LE_PUBLIC:
4945 case BDADDR_LE_RANDOM:
4946 /* Two most significant bits shall be set */
4947 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4955 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4958 struct mgmt_cp_load_irks *cp = cp_data;
4959 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4960 sizeof(struct mgmt_irk_info));
4961 u16 irk_count, expected_len;
4964 BT_DBG("request for %s", hdev->name);
4966 if (!lmp_le_capable(hdev))
4967 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4968 MGMT_STATUS_NOT_SUPPORTED);
4970 irk_count = __le16_to_cpu(cp->irk_count);
4971 if (irk_count > max_irk_count) {
4972 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4973 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4974 MGMT_STATUS_INVALID_PARAMS);
4977 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4978 if (expected_len != len) {
4979 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4981 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4982 MGMT_STATUS_INVALID_PARAMS);
4985 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4987 for (i = 0; i < irk_count; i++) {
4988 struct mgmt_irk_info *key = &cp->irks[i];
4990 if (!irk_is_valid(key))
4991 return cmd_status(sk, hdev->id,
4993 MGMT_STATUS_INVALID_PARAMS);
4998 hci_smp_irks_clear(hdev);
5000 for (i = 0; i < irk_count; i++) {
5001 struct mgmt_irk_info *irk = &cp->irks[i];
5004 if (irk->addr.type == BDADDR_LE_PUBLIC)
5005 addr_type = ADDR_LE_DEV_PUBLIC;
5007 addr_type = ADDR_LE_DEV_RANDOM;
5009 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5013 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5015 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5017 hci_dev_unlock(hdev);
5022 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5024 if (key->master != 0x00 && key->master != 0x01)
5027 switch (key->addr.type) {
5028 case BDADDR_LE_PUBLIC:
5031 case BDADDR_LE_RANDOM:
5032 /* Two most significant bits shall be set */
5033 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5041 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5042 void *cp_data, u16 len)
5044 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5045 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5046 sizeof(struct mgmt_ltk_info));
5047 u16 key_count, expected_len;
5050 BT_DBG("request for %s", hdev->name);
5052 if (!lmp_le_capable(hdev))
5053 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5054 MGMT_STATUS_NOT_SUPPORTED);
5056 key_count = __le16_to_cpu(cp->key_count);
5057 if (key_count > max_key_count) {
5058 BT_ERR("load_ltks: too big key_count value %u", key_count);
5059 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5060 MGMT_STATUS_INVALID_PARAMS);
5063 expected_len = sizeof(*cp) + key_count *
5064 sizeof(struct mgmt_ltk_info);
5065 if (expected_len != len) {
5066 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5068 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5069 MGMT_STATUS_INVALID_PARAMS);
5072 BT_DBG("%s key_count %u", hdev->name, key_count);
5074 for (i = 0; i < key_count; i++) {
5075 struct mgmt_ltk_info *key = &cp->keys[i];
5077 if (!ltk_is_valid(key))
5078 return cmd_status(sk, hdev->id,
5079 MGMT_OP_LOAD_LONG_TERM_KEYS,
5080 MGMT_STATUS_INVALID_PARAMS);
5085 hci_smp_ltks_clear(hdev);
5087 for (i = 0; i < key_count; i++) {
5088 struct mgmt_ltk_info *key = &cp->keys[i];
5089 u8 type, addr_type, authenticated;
5091 if (key->addr.type == BDADDR_LE_PUBLIC)
5092 addr_type = ADDR_LE_DEV_PUBLIC;
5094 addr_type = ADDR_LE_DEV_RANDOM;
5096 switch (key->type) {
5097 case MGMT_LTK_UNAUTHENTICATED:
5098 authenticated = 0x00;
5099 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5101 case MGMT_LTK_AUTHENTICATED:
5102 authenticated = 0x01;
5103 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5105 case MGMT_LTK_P256_UNAUTH:
5106 authenticated = 0x00;
5107 type = SMP_LTK_P256;
5109 case MGMT_LTK_P256_AUTH:
5110 authenticated = 0x01;
5111 type = SMP_LTK_P256;
5113 case MGMT_LTK_P256_DEBUG:
5114 authenticated = 0x00;
5115 type = SMP_LTK_P256_DEBUG;
5120 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5121 authenticated, key->val, key->enc_size, key->ediv,
5125 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5128 hci_dev_unlock(hdev);
5133 static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5135 struct hci_conn *conn = cmd->user_data;
5136 struct mgmt_rp_get_conn_info rp;
5139 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5141 if (status == MGMT_STATUS_SUCCESS) {
5142 rp.rssi = conn->rssi;
5143 rp.tx_power = conn->tx_power;
5144 rp.max_tx_power = conn->max_tx_power;
5146 rp.rssi = HCI_RSSI_INVALID;
5147 rp.tx_power = HCI_TX_POWER_INVALID;
5148 rp.max_tx_power = HCI_TX_POWER_INVALID;
5151 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5154 hci_conn_drop(conn);
5160 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5163 struct hci_cp_read_rssi *cp;
5164 struct pending_cmd *cmd;
5165 struct hci_conn *conn;
5169 BT_DBG("status 0x%02x", hci_status);
5173 /* Commands sent in request are either Read RSSI or Read Transmit Power
5174 * Level so we check which one was last sent to retrieve connection
5175 * handle. Both commands have handle as first parameter so it's safe to
5176 * cast data on the same command struct.
5178 * First command sent is always Read RSSI and we fail only if it fails.
5179 * In other case we simply override error to indicate success as we
5180 * already remembered if TX power value is actually valid.
5182 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5184 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5185 status = MGMT_STATUS_SUCCESS;
5187 status = mgmt_status(hci_status);
5191 BT_ERR("invalid sent_cmd in conn_info response");
5195 handle = __le16_to_cpu(cp->handle);
5196 conn = hci_conn_hash_lookup_handle(hdev, handle);
5198 BT_ERR("unknown handle (%d) in conn_info response", handle);
5202 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5206 cmd->cmd_complete(cmd, status);
5207 mgmt_pending_remove(cmd);
5210 hci_dev_unlock(hdev);
5213 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5216 struct mgmt_cp_get_conn_info *cp = data;
5217 struct mgmt_rp_get_conn_info rp;
5218 struct hci_conn *conn;
5219 unsigned long conn_info_age;
5222 BT_DBG("%s", hdev->name);
5224 memset(&rp, 0, sizeof(rp));
5225 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5226 rp.addr.type = cp->addr.type;
5228 if (!bdaddr_type_is_valid(cp->addr.type))
5229 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5230 MGMT_STATUS_INVALID_PARAMS,
5235 if (!hdev_is_powered(hdev)) {
5236 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5237 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5241 if (cp->addr.type == BDADDR_BREDR)
5242 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5245 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5247 if (!conn || conn->state != BT_CONNECTED) {
5248 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5249 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5253 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5254 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5255 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5259 /* To avoid client trying to guess when to poll again for information we
5260 * calculate conn info age as random value between min/max set in hdev.
5262 conn_info_age = hdev->conn_info_min_age +
5263 prandom_u32_max(hdev->conn_info_max_age -
5264 hdev->conn_info_min_age);
5266 /* Query controller to refresh cached values if they are too old or were
5269 if (time_after(jiffies, conn->conn_info_timestamp +
5270 msecs_to_jiffies(conn_info_age)) ||
5271 !conn->conn_info_timestamp) {
5272 struct hci_request req;
5273 struct hci_cp_read_tx_power req_txp_cp;
5274 struct hci_cp_read_rssi req_rssi_cp;
5275 struct pending_cmd *cmd;
5277 hci_req_init(&req, hdev);
5278 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5279 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5282 /* For LE links TX power does not change thus we don't need to
5283 * query for it once value is known.
5285 if (!bdaddr_type_is_le(cp->addr.type) ||
5286 conn->tx_power == HCI_TX_POWER_INVALID) {
5287 req_txp_cp.handle = cpu_to_le16(conn->handle);
5288 req_txp_cp.type = 0x00;
5289 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5290 sizeof(req_txp_cp), &req_txp_cp);
5293 /* Max TX power needs to be read only once per connection */
5294 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5295 req_txp_cp.handle = cpu_to_le16(conn->handle);
5296 req_txp_cp.type = 0x01;
5297 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5298 sizeof(req_txp_cp), &req_txp_cp);
5301 err = hci_req_run(&req, conn_info_refresh_complete);
5305 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5312 hci_conn_hold(conn);
5313 cmd->user_data = hci_conn_get(conn);
5314 cmd->cmd_complete = conn_info_cmd_complete;
5316 conn->conn_info_timestamp = jiffies;
5318 /* Cache is valid, just reply with values cached in hci_conn */
5319 rp.rssi = conn->rssi;
5320 rp.tx_power = conn->tx_power;
5321 rp.max_tx_power = conn->max_tx_power;
5323 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5324 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5328 hci_dev_unlock(hdev);
5332 static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5334 struct hci_conn *conn = cmd->user_data;
5335 struct mgmt_rp_get_clock_info rp;
5336 struct hci_dev *hdev;
5339 memset(&rp, 0, sizeof(rp));
5340 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5345 hdev = hci_dev_get(cmd->index);
5347 rp.local_clock = cpu_to_le32(hdev->clock);
5352 rp.piconet_clock = cpu_to_le32(conn->clock);
5353 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5357 err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5361 hci_conn_drop(conn);
5368 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5370 struct hci_cp_read_clock *hci_cp;
5371 struct pending_cmd *cmd;
5372 struct hci_conn *conn;
5374 BT_DBG("%s status %u", hdev->name, status);
5378 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5382 if (hci_cp->which) {
5383 u16 handle = __le16_to_cpu(hci_cp->handle);
5384 conn = hci_conn_hash_lookup_handle(hdev, handle);
5389 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5393 cmd->cmd_complete(cmd, mgmt_status(status));
5394 mgmt_pending_remove(cmd);
5397 hci_dev_unlock(hdev);
5400 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5403 struct mgmt_cp_get_clock_info *cp = data;
5404 struct mgmt_rp_get_clock_info rp;
5405 struct hci_cp_read_clock hci_cp;
5406 struct pending_cmd *cmd;
5407 struct hci_request req;
5408 struct hci_conn *conn;
5411 BT_DBG("%s", hdev->name);
5413 memset(&rp, 0, sizeof(rp));
5414 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5415 rp.addr.type = cp->addr.type;
5417 if (cp->addr.type != BDADDR_BREDR)
5418 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5419 MGMT_STATUS_INVALID_PARAMS,
5424 if (!hdev_is_powered(hdev)) {
5425 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5426 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5430 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5431 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5433 if (!conn || conn->state != BT_CONNECTED) {
5434 err = cmd_complete(sk, hdev->id,
5435 MGMT_OP_GET_CLOCK_INFO,
5436 MGMT_STATUS_NOT_CONNECTED,
5444 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5450 cmd->cmd_complete = clock_info_cmd_complete;
5452 hci_req_init(&req, hdev);
5454 memset(&hci_cp, 0, sizeof(hci_cp));
5455 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5458 hci_conn_hold(conn);
5459 cmd->user_data = hci_conn_get(conn);
5461 hci_cp.handle = cpu_to_le16(conn->handle);
5462 hci_cp.which = 0x01; /* Piconet clock */
5463 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5466 err = hci_req_run(&req, get_clock_info_complete);
5468 mgmt_pending_remove(cmd);
5471 hci_dev_unlock(hdev);
5475 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5477 struct hci_conn *conn;
5479 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5483 if (conn->dst_type != type)
5486 if (conn->state != BT_CONNECTED)
5492 /* This function requires the caller holds hdev->lock */
5493 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5494 u8 addr_type, u8 auto_connect)
5496 struct hci_dev *hdev = req->hdev;
5497 struct hci_conn_params *params;
5499 params = hci_conn_params_add(hdev, addr, addr_type);
5503 if (params->auto_connect == auto_connect)
5506 list_del_init(¶ms->action);
5508 switch (auto_connect) {
5509 case HCI_AUTO_CONN_DISABLED:
5510 case HCI_AUTO_CONN_LINK_LOSS:
5511 __hci_update_background_scan(req);
5513 case HCI_AUTO_CONN_REPORT:
5514 list_add(¶ms->action, &hdev->pend_le_reports);
5515 __hci_update_background_scan(req);
5517 case HCI_AUTO_CONN_DIRECT:
5518 case HCI_AUTO_CONN_ALWAYS:
5519 if (!is_connected(hdev, addr, addr_type)) {
5520 list_add(¶ms->action, &hdev->pend_le_conns);
5521 __hci_update_background_scan(req);
5526 params->auto_connect = auto_connect;
5528 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5534 static void device_added(struct sock *sk, struct hci_dev *hdev,
5535 bdaddr_t *bdaddr, u8 type, u8 action)
5537 struct mgmt_ev_device_added ev;
5539 bacpy(&ev.addr.bdaddr, bdaddr);
5540 ev.addr.type = type;
5543 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5546 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5548 struct pending_cmd *cmd;
5550 BT_DBG("status 0x%02x", status);
5554 cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
5558 cmd->cmd_complete(cmd, mgmt_status(status));
5559 mgmt_pending_remove(cmd);
5562 hci_dev_unlock(hdev);
5565 static int add_device(struct sock *sk, struct hci_dev *hdev,
5566 void *data, u16 len)
5568 struct mgmt_cp_add_device *cp = data;
5569 struct pending_cmd *cmd;
5570 struct hci_request req;
5571 u8 auto_conn, addr_type;
5574 BT_DBG("%s", hdev->name);
5576 if (!bdaddr_type_is_valid(cp->addr.type) ||
5577 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5578 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5579 MGMT_STATUS_INVALID_PARAMS,
5580 &cp->addr, sizeof(cp->addr));
5582 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5583 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5584 MGMT_STATUS_INVALID_PARAMS,
5585 &cp->addr, sizeof(cp->addr));
5587 hci_req_init(&req, hdev);
5591 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5597 cmd->cmd_complete = addr_cmd_complete;
5599 if (cp->addr.type == BDADDR_BREDR) {
5600 /* Only incoming connections action is supported for now */
5601 if (cp->action != 0x01) {
5602 err = cmd->cmd_complete(cmd,
5603 MGMT_STATUS_INVALID_PARAMS);
5604 mgmt_pending_remove(cmd);
5608 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5613 __hci_update_page_scan(&req);
5618 if (cp->addr.type == BDADDR_LE_PUBLIC)
5619 addr_type = ADDR_LE_DEV_PUBLIC;
5621 addr_type = ADDR_LE_DEV_RANDOM;
5623 if (cp->action == 0x02)
5624 auto_conn = HCI_AUTO_CONN_ALWAYS;
5625 else if (cp->action == 0x01)
5626 auto_conn = HCI_AUTO_CONN_DIRECT;
5628 auto_conn = HCI_AUTO_CONN_REPORT;
5630 /* If the connection parameters don't exist for this device,
5631 * they will be created and configured with defaults.
5633 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5635 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5636 mgmt_pending_remove(cmd);
5641 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5643 err = hci_req_run(&req, add_device_complete);
5645 /* ENODATA means no HCI commands were needed (e.g. if
5646 * the adapter is powered off).
5648 if (err == -ENODATA)
5649 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5650 mgmt_pending_remove(cmd);
5654 hci_dev_unlock(hdev);
5658 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5659 bdaddr_t *bdaddr, u8 type)
5661 struct mgmt_ev_device_removed ev;
5663 bacpy(&ev.addr.bdaddr, bdaddr);
5664 ev.addr.type = type;
5666 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5669 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5671 struct pending_cmd *cmd;
5673 BT_DBG("status 0x%02x", status);
5677 cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5681 cmd->cmd_complete(cmd, mgmt_status(status));
5682 mgmt_pending_remove(cmd);
5685 hci_dev_unlock(hdev);
5688 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5689 void *data, u16 len)
5691 struct mgmt_cp_remove_device *cp = data;
5692 struct pending_cmd *cmd;
5693 struct hci_request req;
5696 BT_DBG("%s", hdev->name);
5698 hci_req_init(&req, hdev);
5702 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5708 cmd->cmd_complete = addr_cmd_complete;
5710 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5711 struct hci_conn_params *params;
5714 if (!bdaddr_type_is_valid(cp->addr.type)) {
5715 err = cmd->cmd_complete(cmd,
5716 MGMT_STATUS_INVALID_PARAMS);
5717 mgmt_pending_remove(cmd);
5721 if (cp->addr.type == BDADDR_BREDR) {
5722 err = hci_bdaddr_list_del(&hdev->whitelist,
5726 err = cmd->cmd_complete(cmd,
5727 MGMT_STATUS_INVALID_PARAMS);
5728 mgmt_pending_remove(cmd);
5732 __hci_update_page_scan(&req);
5734 device_removed(sk, hdev, &cp->addr.bdaddr,
5739 if (cp->addr.type == BDADDR_LE_PUBLIC)
5740 addr_type = ADDR_LE_DEV_PUBLIC;
5742 addr_type = ADDR_LE_DEV_RANDOM;
5744 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5747 err = cmd->cmd_complete(cmd,
5748 MGMT_STATUS_INVALID_PARAMS);
5749 mgmt_pending_remove(cmd);
5753 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5754 err = cmd->cmd_complete(cmd,
5755 MGMT_STATUS_INVALID_PARAMS);
5756 mgmt_pending_remove(cmd);
5760 list_del(¶ms->action);
5761 list_del(¶ms->list);
5763 __hci_update_background_scan(&req);
5765 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5767 struct hci_conn_params *p, *tmp;
5768 struct bdaddr_list *b, *btmp;
5770 if (cp->addr.type) {
5771 err = cmd->cmd_complete(cmd,
5772 MGMT_STATUS_INVALID_PARAMS);
5773 mgmt_pending_remove(cmd);
5777 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5778 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5783 __hci_update_page_scan(&req);
5785 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5786 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5788 device_removed(sk, hdev, &p->addr, p->addr_type);
5789 list_del(&p->action);
5794 BT_DBG("All LE connection parameters were removed");
5796 __hci_update_background_scan(&req);
5800 err = hci_req_run(&req, remove_device_complete);
5802 /* ENODATA means no HCI commands were needed (e.g. if
5803 * the adapter is powered off).
5805 if (err == -ENODATA)
5806 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5807 mgmt_pending_remove(cmd);
5811 hci_dev_unlock(hdev);
5815 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5818 struct mgmt_cp_load_conn_param *cp = data;
5819 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5820 sizeof(struct mgmt_conn_param));
5821 u16 param_count, expected_len;
5824 if (!lmp_le_capable(hdev))
5825 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5826 MGMT_STATUS_NOT_SUPPORTED);
5828 param_count = __le16_to_cpu(cp->param_count);
5829 if (param_count > max_param_count) {
5830 BT_ERR("load_conn_param: too big param_count value %u",
5832 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5833 MGMT_STATUS_INVALID_PARAMS);
5836 expected_len = sizeof(*cp) + param_count *
5837 sizeof(struct mgmt_conn_param);
5838 if (expected_len != len) {
5839 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5841 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5842 MGMT_STATUS_INVALID_PARAMS);
5845 BT_DBG("%s param_count %u", hdev->name, param_count);
5849 hci_conn_params_clear_disabled(hdev);
5851 for (i = 0; i < param_count; i++) {
5852 struct mgmt_conn_param *param = &cp->params[i];
5853 struct hci_conn_params *hci_param;
5854 u16 min, max, latency, timeout;
5857 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5860 if (param->addr.type == BDADDR_LE_PUBLIC) {
5861 addr_type = ADDR_LE_DEV_PUBLIC;
5862 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5863 addr_type = ADDR_LE_DEV_RANDOM;
5865 BT_ERR("Ignoring invalid connection parameters");
5869 min = le16_to_cpu(param->min_interval);
5870 max = le16_to_cpu(param->max_interval);
5871 latency = le16_to_cpu(param->latency);
5872 timeout = le16_to_cpu(param->timeout);
5874 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5875 min, max, latency, timeout);
5877 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5878 BT_ERR("Ignoring invalid connection parameters");
5882 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5885 BT_ERR("Failed to add connection parameters");
5889 hci_param->conn_min_interval = min;
5890 hci_param->conn_max_interval = max;
5891 hci_param->conn_latency = latency;
5892 hci_param->supervision_timeout = timeout;
5895 hci_dev_unlock(hdev);
5897 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5900 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5901 void *data, u16 len)
5903 struct mgmt_cp_set_external_config *cp = data;
5907 BT_DBG("%s", hdev->name);
5909 if (hdev_is_powered(hdev))
5910 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5911 MGMT_STATUS_REJECTED);
5913 if (cp->config != 0x00 && cp->config != 0x01)
5914 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5915 MGMT_STATUS_INVALID_PARAMS);
5917 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5918 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5919 MGMT_STATUS_NOT_SUPPORTED);
5924 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5927 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5930 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5937 err = new_options(hdev, sk);
5939 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5940 mgmt_index_removed(hdev);
5942 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5943 set_bit(HCI_CONFIG, &hdev->dev_flags);
5944 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5946 queue_work(hdev->req_workqueue, &hdev->power_on);
5948 set_bit(HCI_RAW, &hdev->flags);
5949 mgmt_index_added(hdev);
5954 hci_dev_unlock(hdev);
5958 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5959 void *data, u16 len)
5961 struct mgmt_cp_set_public_address *cp = data;
5965 BT_DBG("%s", hdev->name);
5967 if (hdev_is_powered(hdev))
5968 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5969 MGMT_STATUS_REJECTED);
5971 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5972 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5973 MGMT_STATUS_INVALID_PARAMS);
5975 if (!hdev->set_bdaddr)
5976 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5977 MGMT_STATUS_NOT_SUPPORTED);
5981 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5982 bacpy(&hdev->public_addr, &cp->bdaddr);
5984 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5991 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5992 err = new_options(hdev, sk);
5994 if (is_configured(hdev)) {
5995 mgmt_index_removed(hdev);
5997 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5999 set_bit(HCI_CONFIG, &hdev->dev_flags);
6000 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
6002 queue_work(hdev->req_workqueue, &hdev->power_on);
6006 hci_dev_unlock(hdev);
6010 static const struct mgmt_handler {
6011 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
6015 } mgmt_handlers[] = {
6016 { NULL }, /* 0x0000 (no command) */
6017 { read_version, false, MGMT_READ_VERSION_SIZE },
6018 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
6019 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
6020 { read_controller_info, false, MGMT_READ_INFO_SIZE },
6021 { set_powered, false, MGMT_SETTING_SIZE },
6022 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
6023 { set_connectable, false, MGMT_SETTING_SIZE },
6024 { set_fast_connectable, false, MGMT_SETTING_SIZE },
6025 { set_bondable, false, MGMT_SETTING_SIZE },
6026 { set_link_security, false, MGMT_SETTING_SIZE },
6027 { set_ssp, false, MGMT_SETTING_SIZE },
6028 { set_hs, false, MGMT_SETTING_SIZE },
6029 { set_le, false, MGMT_SETTING_SIZE },
6030 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
6031 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
6032 { add_uuid, false, MGMT_ADD_UUID_SIZE },
6033 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
6034 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
6035 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
6036 { disconnect, false, MGMT_DISCONNECT_SIZE },
6037 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
6038 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
6039 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6040 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
6041 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
6042 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6043 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
6044 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
6045 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6046 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
6047 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6048 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6049 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
6050 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6051 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
6052 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
6053 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
6054 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
6055 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
6056 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
6057 { set_advertising, false, MGMT_SETTING_SIZE },
6058 { set_bredr, false, MGMT_SETTING_SIZE },
6059 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
6060 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
6061 { set_secure_conn, false, MGMT_SETTING_SIZE },
6062 { set_debug_keys, false, MGMT_SETTING_SIZE },
6063 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
6064 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
6065 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
6066 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
6067 { add_device, false, MGMT_ADD_DEVICE_SIZE },
6068 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
6069 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
6070 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
6071 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
6072 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
6073 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
6074 { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
6077 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
6081 struct mgmt_hdr *hdr;
6082 u16 opcode, index, len;
6083 struct hci_dev *hdev = NULL;
6084 const struct mgmt_handler *handler;
6087 BT_DBG("got %zu bytes", msglen);
6089 if (msglen < sizeof(*hdr))
6092 buf = kmalloc(msglen, GFP_KERNEL);
6096 if (memcpy_from_msg(buf, msg, msglen)) {
6102 opcode = __le16_to_cpu(hdr->opcode);
6103 index = __le16_to_cpu(hdr->index);
6104 len = __le16_to_cpu(hdr->len);
6106 if (len != msglen - sizeof(*hdr)) {
6111 if (index != MGMT_INDEX_NONE) {
6112 hdev = hci_dev_get(index);
6114 err = cmd_status(sk, index, opcode,
6115 MGMT_STATUS_INVALID_INDEX);
6119 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
6120 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
6121 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
6122 err = cmd_status(sk, index, opcode,
6123 MGMT_STATUS_INVALID_INDEX);
6127 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
6128 opcode != MGMT_OP_READ_CONFIG_INFO &&
6129 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
6130 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
6131 err = cmd_status(sk, index, opcode,
6132 MGMT_STATUS_INVALID_INDEX);
6137 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
6138 mgmt_handlers[opcode].func == NULL) {
6139 BT_DBG("Unknown op %u", opcode);
6140 err = cmd_status(sk, index, opcode,
6141 MGMT_STATUS_UNKNOWN_COMMAND);
6145 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
6146 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6147 err = cmd_status(sk, index, opcode,
6148 MGMT_STATUS_INVALID_INDEX);
6152 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
6153 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6154 err = cmd_status(sk, index, opcode,
6155 MGMT_STATUS_INVALID_INDEX);
6159 handler = &mgmt_handlers[opcode];
6161 if ((handler->var_len && len < handler->data_len) ||
6162 (!handler->var_len && len != handler->data_len)) {
6163 err = cmd_status(sk, index, opcode,
6164 MGMT_STATUS_INVALID_PARAMS);
6169 mgmt_init_hdev(sk, hdev);
6171 cp = buf + sizeof(*hdr);
6173 err = handler->func(sk, hdev, cp, len);
6187 void mgmt_index_added(struct hci_dev *hdev)
6189 if (hdev->dev_type != HCI_BREDR)
6192 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6195 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6196 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6198 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6201 void mgmt_index_removed(struct hci_dev *hdev)
6203 u8 status = MGMT_STATUS_INVALID_INDEX;
6205 if (hdev->dev_type != HCI_BREDR)
6208 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6211 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6213 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6214 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6216 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6219 /* This function requires the caller holds hdev->lock */
6220 static void restart_le_actions(struct hci_request *req)
6222 struct hci_dev *hdev = req->hdev;
6223 struct hci_conn_params *p;
6225 list_for_each_entry(p, &hdev->le_conn_params, list) {
6226 /* Needed for AUTO_OFF case where might not "really"
6227 * have been powered off.
6229 list_del_init(&p->action);
6231 switch (p->auto_connect) {
6232 case HCI_AUTO_CONN_DIRECT:
6233 case HCI_AUTO_CONN_ALWAYS:
6234 list_add(&p->action, &hdev->pend_le_conns);
6236 case HCI_AUTO_CONN_REPORT:
6237 list_add(&p->action, &hdev->pend_le_reports);
6244 __hci_update_background_scan(req);
6247 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6249 struct cmd_lookup match = { NULL, hdev };
6251 BT_DBG("status 0x%02x", status);
6254 /* Register the available SMP channels (BR/EDR and LE) only
6255 * when successfully powering on the controller. This late
6256 * registration is required so that LE SMP can clearly
6257 * decide if the public address or static address is used.
6264 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6266 new_settings(hdev, match.sk);
6268 hci_dev_unlock(hdev);
6274 static int powered_update_hci(struct hci_dev *hdev)
6276 struct hci_request req;
6279 hci_req_init(&req, hdev);
6281 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6282 !lmp_host_ssp_capable(hdev)) {
6285 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
6287 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6290 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
6291 sizeof(support), &support);
6295 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6296 lmp_bredr_capable(hdev)) {
6297 struct hci_cp_write_le_host_supported cp;
6302 /* Check first if we already have the right
6303 * host state (host features set)
6305 if (cp.le != lmp_host_le_capable(hdev) ||
6306 cp.simul != lmp_host_le_br_capable(hdev))
6307 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6311 if (lmp_le_capable(hdev)) {
6312 /* Make sure the controller has a good default for
6313 * advertising data. This also applies to the case
6314 * where BR/EDR was toggled during the AUTO_OFF phase.
6316 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6317 update_adv_data(&req);
6318 update_scan_rsp_data(&req);
6321 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6322 enable_advertising(&req);
6324 restart_le_actions(&req);
6327 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6328 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6329 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6330 sizeof(link_sec), &link_sec);
6332 if (lmp_bredr_capable(hdev)) {
6333 write_fast_connectable(&req, false);
6334 __hci_update_page_scan(&req);
6340 return hci_req_run(&req, powered_complete);
6343 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6345 struct cmd_lookup match = { NULL, hdev };
6346 u8 status, zero_cod[] = { 0, 0, 0 };
6349 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6353 if (powered_update_hci(hdev) == 0)
6356 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6361 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6363 /* If the power off is because of hdev unregistration let
6364 * use the appropriate INVALID_INDEX status. Otherwise use
6365 * NOT_POWERED. We cover both scenarios here since later in
6366 * mgmt_index_removed() any hci_conn callbacks will have already
6367 * been triggered, potentially causing misleading DISCONNECTED
6370 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
6371 status = MGMT_STATUS_INVALID_INDEX;
6373 status = MGMT_STATUS_NOT_POWERED;
6375 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6377 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6378 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6379 zero_cod, sizeof(zero_cod), NULL);
6382 err = new_settings(hdev, match.sk);
6390 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6392 struct pending_cmd *cmd;
6395 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6399 if (err == -ERFKILL)
6400 status = MGMT_STATUS_RFKILLED;
6402 status = MGMT_STATUS_FAILED;
6404 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6406 mgmt_pending_remove(cmd);
6409 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6411 struct hci_request req;
6415 /* When discoverable timeout triggers, then just make sure
6416 * the limited discoverable flag is cleared. Even in the case
6417 * of a timeout triggered from general discoverable, it is
6418 * safe to unconditionally clear the flag.
6420 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6421 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6423 hci_req_init(&req, hdev);
6424 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6425 u8 scan = SCAN_PAGE;
6426 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6427 sizeof(scan), &scan);
6430 update_adv_data(&req);
6431 hci_req_run(&req, NULL);
6433 hdev->discov_timeout = 0;
6435 new_settings(hdev, NULL);
6437 hci_dev_unlock(hdev);
6440 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6443 struct mgmt_ev_new_link_key ev;
6445 memset(&ev, 0, sizeof(ev));
6447 ev.store_hint = persistent;
6448 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6449 ev.key.addr.type = BDADDR_BREDR;
6450 ev.key.type = key->type;
6451 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6452 ev.key.pin_len = key->pin_len;
6454 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6457 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6459 switch (ltk->type) {
6462 if (ltk->authenticated)
6463 return MGMT_LTK_AUTHENTICATED;
6464 return MGMT_LTK_UNAUTHENTICATED;
6466 if (ltk->authenticated)
6467 return MGMT_LTK_P256_AUTH;
6468 return MGMT_LTK_P256_UNAUTH;
6469 case SMP_LTK_P256_DEBUG:
6470 return MGMT_LTK_P256_DEBUG;
6473 return MGMT_LTK_UNAUTHENTICATED;
6476 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6478 struct mgmt_ev_new_long_term_key ev;
6480 memset(&ev, 0, sizeof(ev));
6482 /* Devices using resolvable or non-resolvable random addresses
6483 * without providing an indentity resolving key don't require
6484 * to store long term keys. Their addresses will change the
6487 * Only when a remote device provides an identity address
6488 * make sure the long term key is stored. If the remote
6489 * identity is known, the long term keys are internally
6490 * mapped to the identity address. So allow static random
6491 * and public addresses here.
6493 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6494 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6495 ev.store_hint = 0x00;
6497 ev.store_hint = persistent;
6499 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6500 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6501 ev.key.type = mgmt_ltk_type(key);
6502 ev.key.enc_size = key->enc_size;
6503 ev.key.ediv = key->ediv;
6504 ev.key.rand = key->rand;
6506 if (key->type == SMP_LTK)
6509 memcpy(ev.key.val, key->val, sizeof(key->val));
6511 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6514 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6516 struct mgmt_ev_new_irk ev;
6518 memset(&ev, 0, sizeof(ev));
6520 /* For identity resolving keys from devices that are already
6521 * using a public address or static random address, do not
6522 * ask for storing this key. The identity resolving key really
6523 * is only mandatory for devices using resovlable random
6526 * Storing all identity resolving keys has the downside that
6527 * they will be also loaded on next boot of they system. More
6528 * identity resolving keys, means more time during scanning is
6529 * needed to actually resolve these addresses.
6531 if (bacmp(&irk->rpa, BDADDR_ANY))
6532 ev.store_hint = 0x01;
6534 ev.store_hint = 0x00;
6536 bacpy(&ev.rpa, &irk->rpa);
6537 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6538 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6539 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6541 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6544 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6547 struct mgmt_ev_new_csrk ev;
6549 memset(&ev, 0, sizeof(ev));
6551 /* Devices using resolvable or non-resolvable random addresses
6552 * without providing an indentity resolving key don't require
6553 * to store signature resolving keys. Their addresses will change
6554 * the next time around.
6556 * Only when a remote device provides an identity address
6557 * make sure the signature resolving key is stored. So allow
6558 * static random and public addresses here.
6560 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6561 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6562 ev.store_hint = 0x00;
6564 ev.store_hint = persistent;
6566 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6567 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6568 ev.key.master = csrk->master;
6569 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6571 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6574 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6575 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6576 u16 max_interval, u16 latency, u16 timeout)
6578 struct mgmt_ev_new_conn_param ev;
6580 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6583 memset(&ev, 0, sizeof(ev));
6584 bacpy(&ev.addr.bdaddr, bdaddr);
6585 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6586 ev.store_hint = store_hint;
6587 ev.min_interval = cpu_to_le16(min_interval);
6588 ev.max_interval = cpu_to_le16(max_interval);
6589 ev.latency = cpu_to_le16(latency);
6590 ev.timeout = cpu_to_le16(timeout);
6592 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6595 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6598 eir[eir_len++] = sizeof(type) + data_len;
6599 eir[eir_len++] = type;
6600 memcpy(&eir[eir_len], data, data_len);
6601 eir_len += data_len;
6606 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6607 u32 flags, u8 *name, u8 name_len)
6610 struct mgmt_ev_device_connected *ev = (void *) buf;
6613 bacpy(&ev->addr.bdaddr, &conn->dst);
6614 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6616 ev->flags = __cpu_to_le32(flags);
6618 /* We must ensure that the EIR Data fields are ordered and
6619 * unique. Keep it simple for now and avoid the problem by not
6620 * adding any BR/EDR data to the LE adv.
6622 if (conn->le_adv_data_len > 0) {
6623 memcpy(&ev->eir[eir_len],
6624 conn->le_adv_data, conn->le_adv_data_len);
6625 eir_len = conn->le_adv_data_len;
6628 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6631 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6632 eir_len = eir_append_data(ev->eir, eir_len,
6634 conn->dev_class, 3);
6637 ev->eir_len = cpu_to_le16(eir_len);
6639 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6640 sizeof(*ev) + eir_len, NULL);
6643 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6645 struct sock **sk = data;
6647 cmd->cmd_complete(cmd, 0);
6652 mgmt_pending_remove(cmd);
6655 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6657 struct hci_dev *hdev = data;
6658 struct mgmt_cp_unpair_device *cp = cmd->param;
6660 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6662 cmd->cmd_complete(cmd, 0);
6663 mgmt_pending_remove(cmd);
6666 bool mgmt_powering_down(struct hci_dev *hdev)
6668 struct pending_cmd *cmd;
6669 struct mgmt_mode *cp;
6671 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6682 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6683 u8 link_type, u8 addr_type, u8 reason,
6684 bool mgmt_connected)
6686 struct mgmt_ev_device_disconnected ev;
6687 struct sock *sk = NULL;
6689 /* The connection is still in hci_conn_hash so test for 1
6690 * instead of 0 to know if this is the last one.
6692 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6693 cancel_delayed_work(&hdev->power_off);
6694 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6697 if (!mgmt_connected)
6700 if (link_type != ACL_LINK && link_type != LE_LINK)
6703 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6705 bacpy(&ev.addr.bdaddr, bdaddr);
6706 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6709 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6714 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6718 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6719 u8 link_type, u8 addr_type, u8 status)
6721 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6722 struct mgmt_cp_disconnect *cp;
6723 struct pending_cmd *cmd;
6725 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6728 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6734 if (bacmp(bdaddr, &cp->addr.bdaddr))
6737 if (cp->addr.type != bdaddr_type)
6740 cmd->cmd_complete(cmd, mgmt_status(status));
6741 mgmt_pending_remove(cmd);
6744 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6745 u8 addr_type, u8 status)
6747 struct mgmt_ev_connect_failed ev;
6749 /* The connection is still in hci_conn_hash so test for 1
6750 * instead of 0 to know if this is the last one.
6752 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6753 cancel_delayed_work(&hdev->power_off);
6754 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6757 bacpy(&ev.addr.bdaddr, bdaddr);
6758 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6759 ev.status = mgmt_status(status);
6761 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6764 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6766 struct mgmt_ev_pin_code_request ev;
6768 bacpy(&ev.addr.bdaddr, bdaddr);
6769 ev.addr.type = BDADDR_BREDR;
6772 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6775 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6778 struct pending_cmd *cmd;
6780 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6784 cmd->cmd_complete(cmd, mgmt_status(status));
6785 mgmt_pending_remove(cmd);
6788 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6791 struct pending_cmd *cmd;
6793 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6797 cmd->cmd_complete(cmd, mgmt_status(status));
6798 mgmt_pending_remove(cmd);
6801 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6802 u8 link_type, u8 addr_type, u32 value,
6805 struct mgmt_ev_user_confirm_request ev;
6807 BT_DBG("%s", hdev->name);
6809 bacpy(&ev.addr.bdaddr, bdaddr);
6810 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6811 ev.confirm_hint = confirm_hint;
6812 ev.value = cpu_to_le32(value);
6814 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6818 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6819 u8 link_type, u8 addr_type)
6821 struct mgmt_ev_user_passkey_request ev;
6823 BT_DBG("%s", hdev->name);
6825 bacpy(&ev.addr.bdaddr, bdaddr);
6826 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6828 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6832 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6833 u8 link_type, u8 addr_type, u8 status,
6836 struct pending_cmd *cmd;
6838 cmd = mgmt_pending_find(opcode, hdev);
6842 cmd->cmd_complete(cmd, mgmt_status(status));
6843 mgmt_pending_remove(cmd);
6848 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6849 u8 link_type, u8 addr_type, u8 status)
6851 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6852 status, MGMT_OP_USER_CONFIRM_REPLY);
6855 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6856 u8 link_type, u8 addr_type, u8 status)
6858 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6860 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6863 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6864 u8 link_type, u8 addr_type, u8 status)
6866 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6867 status, MGMT_OP_USER_PASSKEY_REPLY);
6870 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6871 u8 link_type, u8 addr_type, u8 status)
6873 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6875 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6878 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6879 u8 link_type, u8 addr_type, u32 passkey,
6882 struct mgmt_ev_passkey_notify ev;
6884 BT_DBG("%s", hdev->name);
6886 bacpy(&ev.addr.bdaddr, bdaddr);
6887 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6888 ev.passkey = __cpu_to_le32(passkey);
6889 ev.entered = entered;
6891 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6894 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6896 struct mgmt_ev_auth_failed ev;
6897 struct pending_cmd *cmd;
6898 u8 status = mgmt_status(hci_status);
6900 bacpy(&ev.addr.bdaddr, &conn->dst);
6901 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6904 cmd = find_pairing(conn);
6906 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6907 cmd ? cmd->sk : NULL);
6910 cmd->cmd_complete(cmd, status);
6911 mgmt_pending_remove(cmd);
6915 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6917 struct cmd_lookup match = { NULL, hdev };
6921 u8 mgmt_err = mgmt_status(status);
6922 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6923 cmd_status_rsp, &mgmt_err);
6927 if (test_bit(HCI_AUTH, &hdev->flags))
6928 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6931 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6934 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6938 new_settings(hdev, match.sk);
6944 static void clear_eir(struct hci_request *req)
6946 struct hci_dev *hdev = req->hdev;
6947 struct hci_cp_write_eir cp;
6949 if (!lmp_ext_inq_capable(hdev))
6952 memset(hdev->eir, 0, sizeof(hdev->eir));
6954 memset(&cp, 0, sizeof(cp));
6956 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6959 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6961 struct cmd_lookup match = { NULL, hdev };
6962 struct hci_request req;
6963 bool changed = false;
6966 u8 mgmt_err = mgmt_status(status);
6968 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6969 &hdev->dev_flags)) {
6970 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6971 new_settings(hdev, NULL);
6974 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6980 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6982 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6984 changed = test_and_clear_bit(HCI_HS_ENABLED,
6987 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6990 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6993 new_settings(hdev, match.sk);
6998 hci_req_init(&req, hdev);
7000 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
7001 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
7002 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7003 sizeof(enable), &enable);
7009 hci_req_run(&req, NULL);
7012 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7014 struct cmd_lookup match = { NULL, hdev };
7015 bool changed = false;
7018 u8 mgmt_err = mgmt_status(status);
7021 if (test_and_clear_bit(HCI_SC_ENABLED,
7023 new_settings(hdev, NULL);
7024 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
7027 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
7028 cmd_status_rsp, &mgmt_err);
7033 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
7035 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
7036 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
7039 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
7040 settings_rsp, &match);
7043 new_settings(hdev, match.sk);
7049 static void sk_lookup(struct pending_cmd *cmd, void *data)
7051 struct cmd_lookup *match = data;
7053 if (match->sk == NULL) {
7054 match->sk = cmd->sk;
7055 sock_hold(match->sk);
7059 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7062 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7064 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7065 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7066 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7069 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
7076 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7078 struct mgmt_cp_set_local_name ev;
7079 struct pending_cmd *cmd;
7084 memset(&ev, 0, sizeof(ev));
7085 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7086 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7088 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7090 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7092 /* If this is a HCI command related to powering on the
7093 * HCI dev don't send any mgmt signals.
7095 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
7099 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7100 cmd ? cmd->sk : NULL);
7103 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7104 u8 *rand192, u8 *hash256, u8 *rand256,
7107 struct pending_cmd *cmd;
7109 BT_DBG("%s status %u", hdev->name, status);
7111 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7116 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7117 mgmt_status(status));
7119 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7120 struct mgmt_rp_read_local_oob_ext_data rp;
7122 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7123 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7125 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7126 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7128 cmd_complete(cmd->sk, hdev->id,
7129 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7132 struct mgmt_rp_read_local_oob_data rp;
7134 memcpy(rp.hash, hash192, sizeof(rp.hash));
7135 memcpy(rp.rand, rand192, sizeof(rp.rand));
7137 cmd_complete(cmd->sk, hdev->id,
7138 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7143 mgmt_pending_remove(cmd);
7146 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7150 for (i = 0; i < uuid_count; i++) {
7151 if (!memcmp(uuid, uuids[i], 16))
7158 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7162 while (parsed < eir_len) {
7163 u8 field_len = eir[0];
7170 if (eir_len - parsed < field_len + 1)
7174 case EIR_UUID16_ALL:
7175 case EIR_UUID16_SOME:
7176 for (i = 0; i + 3 <= field_len; i += 2) {
7177 memcpy(uuid, bluetooth_base_uuid, 16);
7178 uuid[13] = eir[i + 3];
7179 uuid[12] = eir[i + 2];
7180 if (has_uuid(uuid, uuid_count, uuids))
7184 case EIR_UUID32_ALL:
7185 case EIR_UUID32_SOME:
7186 for (i = 0; i + 5 <= field_len; i += 4) {
7187 memcpy(uuid, bluetooth_base_uuid, 16);
7188 uuid[15] = eir[i + 5];
7189 uuid[14] = eir[i + 4];
7190 uuid[13] = eir[i + 3];
7191 uuid[12] = eir[i + 2];
7192 if (has_uuid(uuid, uuid_count, uuids))
7196 case EIR_UUID128_ALL:
7197 case EIR_UUID128_SOME:
7198 for (i = 0; i + 17 <= field_len; i += 16) {
7199 memcpy(uuid, eir + i + 2, 16);
7200 if (has_uuid(uuid, uuid_count, uuids))
7206 parsed += field_len + 1;
7207 eir += field_len + 1;
7213 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7214 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7215 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7218 struct mgmt_ev_device_found *ev = (void *) buf;
7222 /* Don't send events for a non-kernel initiated discovery. With
7223 * LE one exception is if we have pend_le_reports > 0 in which
7224 * case we're doing passive scanning and want these events.
7226 if (!hci_discovery_active(hdev)) {
7227 if (link_type == ACL_LINK)
7229 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7233 /* When using service discovery with a RSSI threshold, then check
7234 * if such a RSSI threshold is specified. If a RSSI threshold has
7235 * been specified, then all results with a RSSI smaller than the
7236 * RSSI threshold will be dropped.
7238 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7239 * the results are also dropped.
7241 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7242 (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID))
7245 /* Make sure that the buffer is big enough. The 5 extra bytes
7246 * are for the potential CoD field.
7248 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7251 memset(buf, 0, sizeof(buf));
7253 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7254 * RSSI value was reported as 0 when not available. This behavior
7255 * is kept when using device discovery. This is required for full
7256 * backwards compatibility with the API.
7258 * However when using service discovery, the value 127 will be
7259 * returned when the RSSI is not available.
7261 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7262 link_type == ACL_LINK)
7265 bacpy(&ev->addr.bdaddr, bdaddr);
7266 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7268 ev->flags = cpu_to_le32(flags);
7271 /* When using service discovery and a list of UUID is
7272 * provided, results with no matching UUID should be
7273 * dropped. In case there is a match the result is
7274 * kept and checking possible scan response data
7277 if (hdev->discovery.uuid_count > 0)
7278 match = eir_has_uuids(eir, eir_len,
7279 hdev->discovery.uuid_count,
7280 hdev->discovery.uuids);
7284 if (!match && !scan_rsp_len)
7287 /* Copy EIR or advertising data into event */
7288 memcpy(ev->eir, eir, eir_len);
7290 /* When using service discovery and a list of UUID is
7291 * provided, results with empty EIR or advertising data
7292 * should be dropped since they do not match any UUID.
7294 if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
7300 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7301 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7304 if (scan_rsp_len > 0) {
7305 /* When using service discovery and a list of UUID is
7306 * provided, results with no matching UUID should be
7307 * dropped if there is no previous match from the
7310 if (hdev->discovery.uuid_count > 0) {
7311 if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
7312 hdev->discovery.uuid_count,
7313 hdev->discovery.uuids))
7317 /* Append scan response data to event */
7318 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7320 /* When using service discovery and a list of UUID is
7321 * provided, results with empty scan response and no
7322 * previous matched advertising data should be dropped.
7324 if (hdev->discovery.uuid_count > 0 && !match)
7328 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7329 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7331 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7334 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7335 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7337 struct mgmt_ev_device_found *ev;
7338 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7341 ev = (struct mgmt_ev_device_found *) buf;
7343 memset(buf, 0, sizeof(buf));
7345 bacpy(&ev->addr.bdaddr, bdaddr);
7346 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7349 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7352 ev->eir_len = cpu_to_le16(eir_len);
7354 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7357 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7359 struct mgmt_ev_discovering ev;
7361 BT_DBG("%s discovering %u", hdev->name, discovering);
7363 memset(&ev, 0, sizeof(ev));
7364 ev.type = hdev->discovery.type;
7365 ev.discovering = discovering;
7367 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7370 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7372 BT_DBG("%s status %u", hdev->name, status);
7375 void mgmt_reenable_advertising(struct hci_dev *hdev)
7377 struct hci_request req;
7379 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7382 hci_req_init(&req, hdev);
7383 enable_advertising(&req);
7384 hci_req_run(&req, adv_enable_complete);