2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 8
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
96 MGMT_OP_START_SERVICE_DISCOVERY,
99 static const u16 mgmt_events[] = {
100 MGMT_EV_CONTROLLER_ERROR,
102 MGMT_EV_INDEX_REMOVED,
103 MGMT_EV_NEW_SETTINGS,
104 MGMT_EV_CLASS_OF_DEV_CHANGED,
105 MGMT_EV_LOCAL_NAME_CHANGED,
106 MGMT_EV_NEW_LINK_KEY,
107 MGMT_EV_NEW_LONG_TERM_KEY,
108 MGMT_EV_DEVICE_CONNECTED,
109 MGMT_EV_DEVICE_DISCONNECTED,
110 MGMT_EV_CONNECT_FAILED,
111 MGMT_EV_PIN_CODE_REQUEST,
112 MGMT_EV_USER_CONFIRM_REQUEST,
113 MGMT_EV_USER_PASSKEY_REQUEST,
115 MGMT_EV_DEVICE_FOUND,
117 MGMT_EV_DEVICE_BLOCKED,
118 MGMT_EV_DEVICE_UNBLOCKED,
119 MGMT_EV_DEVICE_UNPAIRED,
120 MGMT_EV_PASSKEY_NOTIFY,
123 MGMT_EV_DEVICE_ADDED,
124 MGMT_EV_DEVICE_REMOVED,
125 MGMT_EV_NEW_CONN_PARAM,
126 MGMT_EV_UNCONF_INDEX_ADDED,
127 MGMT_EV_UNCONF_INDEX_REMOVED,
128 MGMT_EV_NEW_CONFIG_OPTIONS,
131 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
134 struct list_head list;
141 void (*cmd_complete)(struct pending_cmd *cmd, u8 status);
144 /* HCI to MGMT error code conversion table */
145 static u8 mgmt_status_table[] = {
147 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
148 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
149 MGMT_STATUS_FAILED, /* Hardware Failure */
150 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
151 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
152 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
153 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
154 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
155 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
157 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
158 MGMT_STATUS_BUSY, /* Command Disallowed */
159 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
160 MGMT_STATUS_REJECTED, /* Rejected Security */
161 MGMT_STATUS_REJECTED, /* Rejected Personal */
162 MGMT_STATUS_TIMEOUT, /* Host Timeout */
163 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
165 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
166 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
167 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
168 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
169 MGMT_STATUS_BUSY, /* Repeated Attempts */
170 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
171 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
172 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
173 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
174 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
175 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
176 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
177 MGMT_STATUS_FAILED, /* Unspecified Error */
178 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
179 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
180 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
181 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
182 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
183 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
184 MGMT_STATUS_FAILED, /* Unit Link Key Used */
185 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
186 MGMT_STATUS_TIMEOUT, /* Instant Passed */
187 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
188 MGMT_STATUS_FAILED, /* Transaction Collision */
189 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
190 MGMT_STATUS_REJECTED, /* QoS Rejected */
191 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
192 MGMT_STATUS_REJECTED, /* Insufficient Security */
193 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
194 MGMT_STATUS_BUSY, /* Role Switch Pending */
195 MGMT_STATUS_FAILED, /* Slot Violation */
196 MGMT_STATUS_FAILED, /* Role Switch Failed */
197 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
198 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
199 MGMT_STATUS_BUSY, /* Host Busy Pairing */
200 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
201 MGMT_STATUS_BUSY, /* Controller Busy */
202 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
203 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
204 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
205 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
206 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
209 static u8 mgmt_status(u8 hci_status)
211 if (hci_status < ARRAY_SIZE(mgmt_status_table))
212 return mgmt_status_table[hci_status];
214 return MGMT_STATUS_FAILED;
217 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 struct sock *skip_sk)
221 struct mgmt_hdr *hdr;
223 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
228 hdr->opcode = cpu_to_le16(event);
230 hdr->index = cpu_to_le16(hdev->id);
232 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 hdr->len = cpu_to_le16(data_len);
236 memcpy(skb_put(skb, data_len), data, data_len);
239 __net_timestamp(skb);
241 hci_send_to_control(skb, skip_sk);
247 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
250 struct mgmt_hdr *hdr;
251 struct mgmt_ev_cmd_status *ev;
254 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
256 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
260 hdr = (void *) skb_put(skb, sizeof(*hdr));
262 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
263 hdr->index = cpu_to_le16(index);
264 hdr->len = cpu_to_le16(sizeof(*ev));
266 ev = (void *) skb_put(skb, sizeof(*ev));
268 ev->opcode = cpu_to_le16(cmd);
270 err = sock_queue_rcv_skb(sk, skb);
277 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
278 void *rp, size_t rp_len)
281 struct mgmt_hdr *hdr;
282 struct mgmt_ev_cmd_complete *ev;
285 BT_DBG("sock %p", sk);
287 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
291 hdr = (void *) skb_put(skb, sizeof(*hdr));
293 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
294 hdr->index = cpu_to_le16(index);
295 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
297 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
298 ev->opcode = cpu_to_le16(cmd);
302 memcpy(ev->data, rp, rp_len);
304 err = sock_queue_rcv_skb(sk, skb);
311 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
314 struct mgmt_rp_read_version rp;
316 BT_DBG("sock %p", sk);
318 rp.version = MGMT_VERSION;
319 rp.revision = cpu_to_le16(MGMT_REVISION);
321 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
325 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
328 struct mgmt_rp_read_commands *rp;
329 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
330 const u16 num_events = ARRAY_SIZE(mgmt_events);
335 BT_DBG("sock %p", sk);
337 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
339 rp = kmalloc(rp_size, GFP_KERNEL);
343 rp->num_commands = cpu_to_le16(num_commands);
344 rp->num_events = cpu_to_le16(num_events);
346 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
347 put_unaligned_le16(mgmt_commands[i], opcode);
349 for (i = 0; i < num_events; i++, opcode++)
350 put_unaligned_le16(mgmt_events[i], opcode);
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
359 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
362 struct mgmt_rp_read_index_list *rp;
368 BT_DBG("sock %p", sk);
370 read_lock(&hci_dev_list_lock);
373 list_for_each_entry(d, &hci_dev_list, list) {
374 if (d->dev_type == HCI_BREDR &&
375 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
379 rp_len = sizeof(*rp) + (2 * count);
380 rp = kmalloc(rp_len, GFP_ATOMIC);
382 read_unlock(&hci_dev_list_lock);
387 list_for_each_entry(d, &hci_dev_list, list) {
388 if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 test_bit(HCI_CONFIG, &d->dev_flags) ||
390 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
396 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
399 if (d->dev_type == HCI_BREDR &&
400 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
401 rp->index[count++] = cpu_to_le16(d->id);
402 BT_DBG("Added hci%u", d->id);
406 rp->num_controllers = cpu_to_le16(count);
407 rp_len = sizeof(*rp) + (2 * count);
409 read_unlock(&hci_dev_list_lock);
411 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
419 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 void *data, u16 data_len)
422 struct mgmt_rp_read_unconf_index_list *rp;
428 BT_DBG("sock %p", sk);
430 read_lock(&hci_dev_list_lock);
433 list_for_each_entry(d, &hci_dev_list, list) {
434 if (d->dev_type == HCI_BREDR &&
435 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
439 rp_len = sizeof(*rp) + (2 * count);
440 rp = kmalloc(rp_len, GFP_ATOMIC);
442 read_unlock(&hci_dev_list_lock);
447 list_for_each_entry(d, &hci_dev_list, list) {
448 if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 test_bit(HCI_CONFIG, &d->dev_flags) ||
450 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
456 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
459 if (d->dev_type == HCI_BREDR &&
460 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 rp->index[count++] = cpu_to_le16(d->id);
462 BT_DBG("Added hci%u", d->id);
466 rp->num_controllers = cpu_to_le16(count);
467 rp_len = sizeof(*rp) + (2 * count);
469 read_unlock(&hci_dev_list_lock);
471 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
479 static bool is_configured(struct hci_dev *hdev)
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 !bacmp(&hdev->public_addr, BDADDR_ANY))
492 static __le32 get_missing_options(struct hci_dev *hdev)
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 options |= MGMT_OPTION_EXTERNAL_CONFIG;
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 !bacmp(&hdev->public_addr, BDADDR_ANY))
502 options |= MGMT_OPTION_PUBLIC_ADDRESS;
504 return cpu_to_le32(options);
507 static int new_options(struct hci_dev *hdev, struct sock *skip)
509 __le32 options = get_missing_options(hdev);
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 sizeof(options), skip);
515 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
517 __le32 options = get_missing_options(hdev);
519 return cmd_complete(sk, hdev->id, opcode, 0, &options,
523 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 void *data, u16 data_len)
526 struct mgmt_rp_read_config_info rp;
529 BT_DBG("sock %p %s", sk, hdev->name);
533 memset(&rp, 0, sizeof(rp));
534 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 options |= MGMT_OPTION_EXTERNAL_CONFIG;
539 if (hdev->set_bdaddr)
540 options |= MGMT_OPTION_PUBLIC_ADDRESS;
542 rp.supported_options = cpu_to_le32(options);
543 rp.missing_options = get_missing_options(hdev);
545 hci_dev_unlock(hdev);
547 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
551 static u32 get_supported_settings(struct hci_dev *hdev)
555 settings |= MGMT_SETTING_POWERED;
556 settings |= MGMT_SETTING_BONDABLE;
557 settings |= MGMT_SETTING_DEBUG_KEYS;
558 settings |= MGMT_SETTING_CONNECTABLE;
559 settings |= MGMT_SETTING_DISCOVERABLE;
561 if (lmp_bredr_capable(hdev)) {
562 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
563 settings |= MGMT_SETTING_FAST_CONNECTABLE;
564 settings |= MGMT_SETTING_BREDR;
565 settings |= MGMT_SETTING_LINK_SECURITY;
567 if (lmp_ssp_capable(hdev)) {
568 settings |= MGMT_SETTING_SSP;
569 settings |= MGMT_SETTING_HS;
572 if (lmp_sc_capable(hdev) ||
573 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
574 settings |= MGMT_SETTING_SECURE_CONN;
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_SECURE_CONN;
581 settings |= MGMT_SETTING_PRIVACY;
584 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
586 settings |= MGMT_SETTING_CONFIGURATION;
591 static u32 get_current_settings(struct hci_dev *hdev)
595 if (hdev_is_powered(hdev))
596 settings |= MGMT_SETTING_POWERED;
598 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
599 settings |= MGMT_SETTING_CONNECTABLE;
601 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
602 settings |= MGMT_SETTING_FAST_CONNECTABLE;
604 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
605 settings |= MGMT_SETTING_DISCOVERABLE;
607 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
608 settings |= MGMT_SETTING_BONDABLE;
610 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
611 settings |= MGMT_SETTING_BREDR;
613 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
614 settings |= MGMT_SETTING_LE;
616 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
617 settings |= MGMT_SETTING_LINK_SECURITY;
619 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
620 settings |= MGMT_SETTING_SSP;
622 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
623 settings |= MGMT_SETTING_HS;
625 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
626 settings |= MGMT_SETTING_ADVERTISING;
628 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
629 settings |= MGMT_SETTING_SECURE_CONN;
631 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
632 settings |= MGMT_SETTING_DEBUG_KEYS;
634 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
635 settings |= MGMT_SETTING_PRIVACY;
640 #define PNP_INFO_SVCLASS_ID 0x1200
642 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
644 u8 *ptr = data, *uuids_start = NULL;
645 struct bt_uuid *uuid;
650 list_for_each_entry(uuid, &hdev->uuids, list) {
653 if (uuid->size != 16)
656 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
660 if (uuid16 == PNP_INFO_SVCLASS_ID)
666 uuids_start[1] = EIR_UUID16_ALL;
670 /* Stop if not enough space to put next UUID */
671 if ((ptr - data) + sizeof(u16) > len) {
672 uuids_start[1] = EIR_UUID16_SOME;
676 *ptr++ = (uuid16 & 0x00ff);
677 *ptr++ = (uuid16 & 0xff00) >> 8;
678 uuids_start[0] += sizeof(uuid16);
684 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
686 u8 *ptr = data, *uuids_start = NULL;
687 struct bt_uuid *uuid;
692 list_for_each_entry(uuid, &hdev->uuids, list) {
693 if (uuid->size != 32)
699 uuids_start[1] = EIR_UUID32_ALL;
703 /* Stop if not enough space to put next UUID */
704 if ((ptr - data) + sizeof(u32) > len) {
705 uuids_start[1] = EIR_UUID32_SOME;
709 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
711 uuids_start[0] += sizeof(u32);
717 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
719 u8 *ptr = data, *uuids_start = NULL;
720 struct bt_uuid *uuid;
725 list_for_each_entry(uuid, &hdev->uuids, list) {
726 if (uuid->size != 128)
732 uuids_start[1] = EIR_UUID128_ALL;
736 /* Stop if not enough space to put next UUID */
737 if ((ptr - data) + 16 > len) {
738 uuids_start[1] = EIR_UUID128_SOME;
742 memcpy(ptr, uuid->uuid, 16);
744 uuids_start[0] += 16;
750 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
752 struct pending_cmd *cmd;
754 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
755 if (cmd->opcode == opcode)
762 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
763 struct hci_dev *hdev,
766 struct pending_cmd *cmd;
768 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
769 if (cmd->user_data != data)
771 if (cmd->opcode == opcode)
778 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
783 name_len = strlen(hdev->dev_name);
785 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
787 if (name_len > max_len) {
789 ptr[1] = EIR_NAME_SHORT;
791 ptr[1] = EIR_NAME_COMPLETE;
793 ptr[0] = name_len + 1;
795 memcpy(ptr + 2, hdev->dev_name, name_len);
797 ad_len += (name_len + 2);
798 ptr += (name_len + 2);
804 static void update_scan_rsp_data(struct hci_request *req)
806 struct hci_dev *hdev = req->hdev;
807 struct hci_cp_le_set_scan_rsp_data cp;
810 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
813 memset(&cp, 0, sizeof(cp));
815 len = create_scan_rsp_data(hdev, cp.data);
817 if (hdev->scan_rsp_data_len == len &&
818 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
821 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
822 hdev->scan_rsp_data_len = len;
826 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
829 static u8 get_adv_discov_flags(struct hci_dev *hdev)
831 struct pending_cmd *cmd;
833 /* If there's a pending mgmt command the flags will not yet have
834 * their final values, so check for this first.
836 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
838 struct mgmt_mode *cp = cmd->param;
840 return LE_AD_GENERAL;
841 else if (cp->val == 0x02)
842 return LE_AD_LIMITED;
844 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
845 return LE_AD_LIMITED;
846 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
847 return LE_AD_GENERAL;
853 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
855 u8 ad_len = 0, flags = 0;
857 flags |= get_adv_discov_flags(hdev);
859 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
860 flags |= LE_AD_NO_BREDR;
863 BT_DBG("adv flags 0x%02x", flags);
873 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
875 ptr[1] = EIR_TX_POWER;
876 ptr[2] = (u8) hdev->adv_tx_power;
885 static void update_adv_data(struct hci_request *req)
887 struct hci_dev *hdev = req->hdev;
888 struct hci_cp_le_set_adv_data cp;
891 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
894 memset(&cp, 0, sizeof(cp));
896 len = create_adv_data(hdev, cp.data);
898 if (hdev->adv_data_len == len &&
899 memcmp(cp.data, hdev->adv_data, len) == 0)
902 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
903 hdev->adv_data_len = len;
907 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
910 int mgmt_update_adv_data(struct hci_dev *hdev)
912 struct hci_request req;
914 hci_req_init(&req, hdev);
915 update_adv_data(&req);
917 return hci_req_run(&req, NULL);
920 static void create_eir(struct hci_dev *hdev, u8 *data)
925 name_len = strlen(hdev->dev_name);
931 ptr[1] = EIR_NAME_SHORT;
933 ptr[1] = EIR_NAME_COMPLETE;
935 /* EIR Data length */
936 ptr[0] = name_len + 1;
938 memcpy(ptr + 2, hdev->dev_name, name_len);
940 ptr += (name_len + 2);
943 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
945 ptr[1] = EIR_TX_POWER;
946 ptr[2] = (u8) hdev->inq_tx_power;
951 if (hdev->devid_source > 0) {
953 ptr[1] = EIR_DEVICE_ID;
955 put_unaligned_le16(hdev->devid_source, ptr + 2);
956 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
957 put_unaligned_le16(hdev->devid_product, ptr + 6);
958 put_unaligned_le16(hdev->devid_version, ptr + 8);
963 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
964 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
965 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
968 static void update_eir(struct hci_request *req)
970 struct hci_dev *hdev = req->hdev;
971 struct hci_cp_write_eir cp;
973 if (!hdev_is_powered(hdev))
976 if (!lmp_ext_inq_capable(hdev))
979 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
982 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
985 memset(&cp, 0, sizeof(cp));
987 create_eir(hdev, cp.data);
989 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
992 memcpy(hdev->eir, cp.data, sizeof(cp.data));
994 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
997 static u8 get_service_classes(struct hci_dev *hdev)
999 struct bt_uuid *uuid;
1002 list_for_each_entry(uuid, &hdev->uuids, list)
1003 val |= uuid->svc_hint;
1008 static void update_class(struct hci_request *req)
1010 struct hci_dev *hdev = req->hdev;
1013 BT_DBG("%s", hdev->name);
1015 if (!hdev_is_powered(hdev))
1018 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1021 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1024 cod[0] = hdev->minor_class;
1025 cod[1] = hdev->major_class;
1026 cod[2] = get_service_classes(hdev);
1028 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1031 if (memcmp(cod, hdev->dev_class, 3) == 0)
1034 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1037 static bool get_connectable(struct hci_dev *hdev)
1039 struct pending_cmd *cmd;
1041 /* If there's a pending mgmt command the flag will not yet have
1042 * it's final value, so check for this first.
1044 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1046 struct mgmt_mode *cp = cmd->param;
1050 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1053 static void disable_advertising(struct hci_request *req)
1057 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1060 static void enable_advertising(struct hci_request *req)
1062 struct hci_dev *hdev = req->hdev;
1063 struct hci_cp_le_set_adv_param cp;
1064 u8 own_addr_type, enable = 0x01;
1067 if (hci_conn_num(hdev, LE_LINK) > 0)
1070 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1071 disable_advertising(req);
1073 /* Clear the HCI_LE_ADV bit temporarily so that the
1074 * hci_update_random_address knows that it's safe to go ahead
1075 * and write a new random address. The flag will be set back on
1076 * as soon as the SET_ADV_ENABLE HCI command completes.
1078 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1080 connectable = get_connectable(hdev);
1082 /* Set require_privacy to true only when non-connectable
1083 * advertising is used. In that case it is fine to use a
1084 * non-resolvable private address.
1086 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1089 memset(&cp, 0, sizeof(cp));
1090 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1091 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1092 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1093 cp.own_address_type = own_addr_type;
1094 cp.channel_map = hdev->le_adv_channel_map;
1096 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1098 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1101 static void service_cache_off(struct work_struct *work)
1103 struct hci_dev *hdev = container_of(work, struct hci_dev,
1104 service_cache.work);
1105 struct hci_request req;
1107 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1110 hci_req_init(&req, hdev);
1117 hci_dev_unlock(hdev);
1119 hci_req_run(&req, NULL);
1122 static void rpa_expired(struct work_struct *work)
1124 struct hci_dev *hdev = container_of(work, struct hci_dev,
1126 struct hci_request req;
1130 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1132 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1135 /* The generation of a new RPA and programming it into the
1136 * controller happens in the enable_advertising() function.
1138 hci_req_init(&req, hdev);
1139 enable_advertising(&req);
1140 hci_req_run(&req, NULL);
1143 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1145 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1148 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1149 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1151 /* Non-mgmt controlled devices get this bit set
1152 * implicitly so that pairing works for them, however
1153 * for mgmt we require user-space to explicitly enable
1156 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1159 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1160 void *data, u16 data_len)
1162 struct mgmt_rp_read_info rp;
1164 BT_DBG("sock %p %s", sk, hdev->name);
1168 memset(&rp, 0, sizeof(rp));
1170 bacpy(&rp.bdaddr, &hdev->bdaddr);
1172 rp.version = hdev->hci_ver;
1173 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1175 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1176 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1178 memcpy(rp.dev_class, hdev->dev_class, 3);
1180 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1181 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1183 hci_dev_unlock(hdev);
1185 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1189 static void mgmt_pending_free(struct pending_cmd *cmd)
1196 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1197 struct hci_dev *hdev, void *data,
1200 struct pending_cmd *cmd;
1202 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1206 cmd->opcode = opcode;
1207 cmd->index = hdev->id;
1209 cmd->param = kmemdup(data, len, GFP_KERNEL);
1215 cmd->param_len = len;
1220 list_add(&cmd->list, &hdev->mgmt_pending);
1225 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1226 void (*cb)(struct pending_cmd *cmd,
1230 struct pending_cmd *cmd, *tmp;
1232 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1233 if (opcode > 0 && cmd->opcode != opcode)
1240 static void mgmt_pending_remove(struct pending_cmd *cmd)
1242 list_del(&cmd->list);
1243 mgmt_pending_free(cmd);
1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1254 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1256 BT_DBG("%s status 0x%02x", hdev->name, status);
1258 if (hci_conn_count(hdev) == 0) {
1259 cancel_delayed_work(&hdev->power_off);
1260 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1264 static bool hci_stop_discovery(struct hci_request *req)
1266 struct hci_dev *hdev = req->hdev;
1267 struct hci_cp_remote_name_req_cancel cp;
1268 struct inquiry_entry *e;
1270 switch (hdev->discovery.state) {
1271 case DISCOVERY_FINDING:
1272 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1273 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1275 cancel_delayed_work(&hdev->le_scan_disable);
1276 hci_req_add_le_scan_disable(req);
1281 case DISCOVERY_RESOLVING:
1282 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1287 bacpy(&cp.bdaddr, &e->data.bdaddr);
1288 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1294 /* Passive scanning */
1295 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1296 hci_req_add_le_scan_disable(req);
1306 static int clean_up_hci_state(struct hci_dev *hdev)
1308 struct hci_request req;
1309 struct hci_conn *conn;
1310 bool discov_stopped;
1313 hci_req_init(&req, hdev);
1315 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1316 test_bit(HCI_PSCAN, &hdev->flags)) {
1318 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1321 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1322 disable_advertising(&req);
1324 discov_stopped = hci_stop_discovery(&req);
1326 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1327 struct hci_cp_disconnect dc;
1328 struct hci_cp_reject_conn_req rej;
1330 switch (conn->state) {
1333 dc.handle = cpu_to_le16(conn->handle);
1334 dc.reason = 0x15; /* Terminated due to Power Off */
1335 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1338 if (conn->type == LE_LINK)
1339 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1341 else if (conn->type == ACL_LINK)
1342 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1346 bacpy(&rej.bdaddr, &conn->dst);
1347 rej.reason = 0x15; /* Terminated due to Power Off */
1348 if (conn->type == ACL_LINK)
1349 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1351 else if (conn->type == SCO_LINK)
1352 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1358 err = hci_req_run(&req, clean_up_hci_complete);
1359 if (!err && discov_stopped)
1360 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1365 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1368 struct mgmt_mode *cp = data;
1369 struct pending_cmd *cmd;
1372 BT_DBG("request for %s", hdev->name);
1374 if (cp->val != 0x00 && cp->val != 0x01)
1375 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1376 MGMT_STATUS_INVALID_PARAMS);
1380 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1381 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1386 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1387 cancel_delayed_work(&hdev->power_off);
1390 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1392 err = mgmt_powered(hdev, 1);
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1409 queue_work(hdev->req_workqueue, &hdev->power_on);
1412 /* Disconnect connections, stop scans, etc */
1413 err = clean_up_hci_state(hdev);
1415 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1416 HCI_POWER_OFF_TIMEOUT);
1418 /* ENODATA means there were no HCI commands queued */
1419 if (err == -ENODATA) {
1420 cancel_delayed_work(&hdev->power_off);
1421 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1427 hci_dev_unlock(hdev);
1431 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1435 ev = cpu_to_le32(get_current_settings(hdev));
1437 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1440 int mgmt_new_settings(struct hci_dev *hdev)
1442 return new_settings(hdev, NULL);
1447 struct hci_dev *hdev;
1451 static void settings_rsp(struct pending_cmd *cmd, void *data)
1453 struct cmd_lookup *match = data;
1455 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1457 list_del(&cmd->list);
1459 if (match->sk == NULL) {
1460 match->sk = cmd->sk;
1461 sock_hold(match->sk);
1464 mgmt_pending_free(cmd);
1467 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1471 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1472 mgmt_pending_remove(cmd);
1475 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1477 if (cmd->cmd_complete) {
1480 cmd->cmd_complete(cmd, *status);
1481 mgmt_pending_remove(cmd);
1486 cmd_status_rsp(cmd, data);
1489 static void generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1491 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1495 static void addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1497 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1498 sizeof(struct mgmt_addr_info));
1501 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1503 if (!lmp_bredr_capable(hdev))
1504 return MGMT_STATUS_NOT_SUPPORTED;
1505 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1506 return MGMT_STATUS_REJECTED;
1508 return MGMT_STATUS_SUCCESS;
1511 static u8 mgmt_le_support(struct hci_dev *hdev)
1513 if (!lmp_le_capable(hdev))
1514 return MGMT_STATUS_NOT_SUPPORTED;
1515 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1516 return MGMT_STATUS_REJECTED;
1518 return MGMT_STATUS_SUCCESS;
1521 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1523 struct pending_cmd *cmd;
1524 struct mgmt_mode *cp;
1525 struct hci_request req;
1528 BT_DBG("status 0x%02x", status);
1532 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1537 u8 mgmt_err = mgmt_status(status);
1538 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1539 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1545 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1548 if (hdev->discov_timeout > 0) {
1549 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1550 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1554 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1558 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1561 new_settings(hdev, cmd->sk);
1563 /* When the discoverable mode gets changed, make sure
1564 * that class of device has the limited discoverable
1565 * bit correctly set. Also update page scan based on whitelist
1568 hci_req_init(&req, hdev);
1569 hci_update_page_scan(hdev, &req);
1571 hci_req_run(&req, NULL);
1574 mgmt_pending_remove(cmd);
1577 hci_dev_unlock(hdev);
1580 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1583 struct mgmt_cp_set_discoverable *cp = data;
1584 struct pending_cmd *cmd;
1585 struct hci_request req;
1590 BT_DBG("request for %s", hdev->name);
1592 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1593 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1594 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 MGMT_STATUS_REJECTED);
1597 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1598 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 MGMT_STATUS_INVALID_PARAMS);
1601 timeout = __le16_to_cpu(cp->timeout);
1603 /* Disabling discoverable requires that no timeout is set,
1604 * and enabling limited discoverable requires a timeout.
1606 if ((cp->val == 0x00 && timeout > 0) ||
1607 (cp->val == 0x02 && timeout == 0))
1608 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1609 MGMT_STATUS_INVALID_PARAMS);
1613 if (!hdev_is_powered(hdev) && timeout > 0) {
1614 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1615 MGMT_STATUS_NOT_POWERED);
1619 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1620 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1621 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1626 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1627 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1628 MGMT_STATUS_REJECTED);
1632 if (!hdev_is_powered(hdev)) {
1633 bool changed = false;
1635 /* Setting limited discoverable when powered off is
1636 * not a valid operation since it requires a timeout
1637 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1639 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1640 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1644 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1649 err = new_settings(hdev, sk);
1654 /* If the current mode is the same, then just update the timeout
1655 * value with the new value. And if only the timeout gets updated,
1656 * then no need for any HCI transactions.
1658 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1659 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1660 &hdev->dev_flags)) {
1661 cancel_delayed_work(&hdev->discov_off);
1662 hdev->discov_timeout = timeout;
1664 if (cp->val && hdev->discov_timeout > 0) {
1665 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1666 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1670 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1674 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1680 /* Cancel any potential discoverable timeout that might be
1681 * still active and store new timeout value. The arming of
1682 * the timeout happens in the complete handler.
1684 cancel_delayed_work(&hdev->discov_off);
1685 hdev->discov_timeout = timeout;
1687 /* Limited discoverable mode */
1688 if (cp->val == 0x02)
1689 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1691 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1693 hci_req_init(&req, hdev);
1695 /* The procedure for LE-only controllers is much simpler - just
1696 * update the advertising data.
1698 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1704 struct hci_cp_write_current_iac_lap hci_cp;
1706 if (cp->val == 0x02) {
1707 /* Limited discoverable mode */
1708 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1709 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1710 hci_cp.iac_lap[1] = 0x8b;
1711 hci_cp.iac_lap[2] = 0x9e;
1712 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1713 hci_cp.iac_lap[4] = 0x8b;
1714 hci_cp.iac_lap[5] = 0x9e;
1716 /* General discoverable mode */
1718 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1719 hci_cp.iac_lap[1] = 0x8b;
1720 hci_cp.iac_lap[2] = 0x9e;
1723 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1724 (hci_cp.num_iac * 3) + 1, &hci_cp);
1726 scan |= SCAN_INQUIRY;
1728 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1731 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1734 update_adv_data(&req);
1736 err = hci_req_run(&req, set_discoverable_complete);
1738 mgmt_pending_remove(cmd);
1741 hci_dev_unlock(hdev);
1745 static void write_fast_connectable(struct hci_request *req, bool enable)
1747 struct hci_dev *hdev = req->hdev;
1748 struct hci_cp_write_page_scan_activity acp;
1751 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1754 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1758 type = PAGE_SCAN_TYPE_INTERLACED;
1760 /* 160 msec page scan interval */
1761 acp.interval = cpu_to_le16(0x0100);
1763 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1765 /* default 1.28 sec page scan */
1766 acp.interval = cpu_to_le16(0x0800);
1769 acp.window = cpu_to_le16(0x0012);
1771 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1772 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1773 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1776 if (hdev->page_scan_type != type)
1777 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1780 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1782 struct pending_cmd *cmd;
1783 struct mgmt_mode *cp;
1784 bool conn_changed, discov_changed;
1786 BT_DBG("status 0x%02x", status);
1790 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1795 u8 mgmt_err = mgmt_status(status);
1796 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1802 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1804 discov_changed = false;
1806 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1808 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1812 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1814 if (conn_changed || discov_changed) {
1815 new_settings(hdev, cmd->sk);
1816 hci_update_page_scan(hdev, NULL);
1818 mgmt_update_adv_data(hdev);
1819 hci_update_background_scan(hdev);
1823 mgmt_pending_remove(cmd);
1826 hci_dev_unlock(hdev);
1829 static int set_connectable_update_settings(struct hci_dev *hdev,
1830 struct sock *sk, u8 val)
1832 bool changed = false;
1835 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1839 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1841 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1842 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1845 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1850 hci_update_page_scan(hdev, NULL);
1851 hci_update_background_scan(hdev);
1852 return new_settings(hdev, sk);
1858 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1861 struct mgmt_mode *cp = data;
1862 struct pending_cmd *cmd;
1863 struct hci_request req;
1867 BT_DBG("request for %s", hdev->name);
1869 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1870 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1871 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1872 MGMT_STATUS_REJECTED);
1874 if (cp->val != 0x00 && cp->val != 0x01)
1875 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1876 MGMT_STATUS_INVALID_PARAMS);
1880 if (!hdev_is_powered(hdev)) {
1881 err = set_connectable_update_settings(hdev, sk, cp->val);
1885 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1886 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1887 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1892 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1898 hci_req_init(&req, hdev);
1900 /* If BR/EDR is not enabled and we disable advertising as a
1901 * by-product of disabling connectable, we need to update the
1902 * advertising flags.
1904 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1906 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1907 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1909 update_adv_data(&req);
1910 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1914 /* If we don't have any whitelist entries just
1915 * disable all scanning. If there are entries
1916 * and we had both page and inquiry scanning
1917 * enabled then fall back to only page scanning.
1918 * Otherwise no changes are needed.
1920 if (list_empty(&hdev->whitelist))
1921 scan = SCAN_DISABLED;
1922 else if (test_bit(HCI_ISCAN, &hdev->flags))
1925 goto no_scan_update;
1927 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1928 hdev->discov_timeout > 0)
1929 cancel_delayed_work(&hdev->discov_off);
1932 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1936 /* If we're going from non-connectable to connectable or
1937 * vice-versa when fast connectable is enabled ensure that fast
1938 * connectable gets disabled. write_fast_connectable won't do
1939 * anything if the page scan parameters are already what they
1942 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1943 write_fast_connectable(&req, false);
1945 /* Update the advertising parameters if necessary */
1946 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1947 enable_advertising(&req);
1949 err = hci_req_run(&req, set_connectable_complete);
1951 mgmt_pending_remove(cmd);
1952 if (err == -ENODATA)
1953 err = set_connectable_update_settings(hdev, sk,
1959 hci_dev_unlock(hdev);
1963 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1966 struct mgmt_mode *cp = data;
1970 BT_DBG("request for %s", hdev->name);
1972 if (cp->val != 0x00 && cp->val != 0x01)
1973 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1974 MGMT_STATUS_INVALID_PARAMS);
1979 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1981 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1983 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1988 err = new_settings(hdev, sk);
1991 hci_dev_unlock(hdev);
1995 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1998 struct mgmt_mode *cp = data;
1999 struct pending_cmd *cmd;
2003 BT_DBG("request for %s", hdev->name);
2005 status = mgmt_bredr_support(hdev);
2007 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2010 if (cp->val != 0x00 && cp->val != 0x01)
2011 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2012 MGMT_STATUS_INVALID_PARAMS);
2016 if (!hdev_is_powered(hdev)) {
2017 bool changed = false;
2019 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2020 &hdev->dev_flags)) {
2021 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2025 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2030 err = new_settings(hdev, sk);
2035 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2036 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2043 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2044 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2048 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2054 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2056 mgmt_pending_remove(cmd);
2061 hci_dev_unlock(hdev);
2065 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2067 struct mgmt_mode *cp = data;
2068 struct pending_cmd *cmd;
2072 BT_DBG("request for %s", hdev->name);
2074 status = mgmt_bredr_support(hdev);
2076 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2078 if (!lmp_ssp_capable(hdev))
2079 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2080 MGMT_STATUS_NOT_SUPPORTED);
2082 if (cp->val != 0x00 && cp->val != 0x01)
2083 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2084 MGMT_STATUS_INVALID_PARAMS);
2088 if (!hdev_is_powered(hdev)) {
2092 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2095 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2098 changed = test_and_clear_bit(HCI_HS_ENABLED,
2101 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2104 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2109 err = new_settings(hdev, sk);
2114 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2115 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2116 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2121 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2122 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2126 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2132 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2133 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2134 sizeof(cp->val), &cp->val);
2136 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2138 mgmt_pending_remove(cmd);
2143 hci_dev_unlock(hdev);
2147 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2149 struct mgmt_mode *cp = data;
2154 BT_DBG("request for %s", hdev->name);
2156 status = mgmt_bredr_support(hdev);
2158 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2160 if (!lmp_ssp_capable(hdev))
2161 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2162 MGMT_STATUS_NOT_SUPPORTED);
2164 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2165 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2166 MGMT_STATUS_REJECTED);
2168 if (cp->val != 0x00 && cp->val != 0x01)
2169 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2170 MGMT_STATUS_INVALID_PARAMS);
2175 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2177 if (hdev_is_powered(hdev)) {
2178 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2179 MGMT_STATUS_REJECTED);
2183 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2186 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2191 err = new_settings(hdev, sk);
2194 hci_dev_unlock(hdev);
2198 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2200 struct cmd_lookup match = { NULL, hdev };
2205 u8 mgmt_err = mgmt_status(status);
2207 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2212 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2214 new_settings(hdev, match.sk);
2219 /* Make sure the controller has a good default for
2220 * advertising data. Restrict the update to when LE
2221 * has actually been enabled. During power on, the
2222 * update in powered_update_hci will take care of it.
2224 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2225 struct hci_request req;
2227 hci_req_init(&req, hdev);
2228 update_adv_data(&req);
2229 update_scan_rsp_data(&req);
2230 hci_req_run(&req, NULL);
2232 hci_update_background_scan(hdev);
2236 hci_dev_unlock(hdev);
2239 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2241 struct mgmt_mode *cp = data;
2242 struct hci_cp_write_le_host_supported hci_cp;
2243 struct pending_cmd *cmd;
2244 struct hci_request req;
2248 BT_DBG("request for %s", hdev->name);
2250 if (!lmp_le_capable(hdev))
2251 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2252 MGMT_STATUS_NOT_SUPPORTED);
2254 if (cp->val != 0x00 && cp->val != 0x01)
2255 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2256 MGMT_STATUS_INVALID_PARAMS);
2258 /* LE-only devices do not allow toggling LE on/off */
2259 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2260 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2261 MGMT_STATUS_REJECTED);
2266 enabled = lmp_host_le_capable(hdev);
2268 if (!hdev_is_powered(hdev) || val == enabled) {
2269 bool changed = false;
2271 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2272 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2276 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2277 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2281 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2286 err = new_settings(hdev, sk);
2291 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2292 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2293 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2298 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2304 hci_req_init(&req, hdev);
2306 memset(&hci_cp, 0, sizeof(hci_cp));
2310 hci_cp.simul = 0x00;
2312 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2313 disable_advertising(&req);
2316 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2319 err = hci_req_run(&req, le_enable_complete);
2321 mgmt_pending_remove(cmd);
2324 hci_dev_unlock(hdev);
2328 /* This is a helper function to test for pending mgmt commands that can
2329 * cause CoD or EIR HCI commands. We can only allow one such pending
2330 * mgmt command at a time since otherwise we cannot easily track what
2331 * the current values are, will be, and based on that calculate if a new
2332 * HCI command needs to be sent and if yes with what value.
2334 static bool pending_eir_or_class(struct hci_dev *hdev)
2336 struct pending_cmd *cmd;
2338 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2339 switch (cmd->opcode) {
2340 case MGMT_OP_ADD_UUID:
2341 case MGMT_OP_REMOVE_UUID:
2342 case MGMT_OP_SET_DEV_CLASS:
2343 case MGMT_OP_SET_POWERED:
2351 static const u8 bluetooth_base_uuid[] = {
2352 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2353 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2356 static u8 get_uuid_size(const u8 *uuid)
2360 if (memcmp(uuid, bluetooth_base_uuid, 12))
2363 val = get_unaligned_le32(&uuid[12]);
2370 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2372 struct pending_cmd *cmd;
2376 cmd = mgmt_pending_find(mgmt_op, hdev);
2380 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2381 hdev->dev_class, 3);
2383 mgmt_pending_remove(cmd);
2386 hci_dev_unlock(hdev);
2389 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2391 BT_DBG("status 0x%02x", status);
2393 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2396 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2398 struct mgmt_cp_add_uuid *cp = data;
2399 struct pending_cmd *cmd;
2400 struct hci_request req;
2401 struct bt_uuid *uuid;
2404 BT_DBG("request for %s", hdev->name);
2408 if (pending_eir_or_class(hdev)) {
2409 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2414 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2420 memcpy(uuid->uuid, cp->uuid, 16);
2421 uuid->svc_hint = cp->svc_hint;
2422 uuid->size = get_uuid_size(cp->uuid);
2424 list_add_tail(&uuid->list, &hdev->uuids);
2426 hci_req_init(&req, hdev);
2431 err = hci_req_run(&req, add_uuid_complete);
2433 if (err != -ENODATA)
2436 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2437 hdev->dev_class, 3);
2441 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2450 hci_dev_unlock(hdev);
2454 static bool enable_service_cache(struct hci_dev *hdev)
2456 if (!hdev_is_powered(hdev))
2459 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2460 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2468 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2470 BT_DBG("status 0x%02x", status);
2472 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2475 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2478 struct mgmt_cp_remove_uuid *cp = data;
2479 struct pending_cmd *cmd;
2480 struct bt_uuid *match, *tmp;
2481 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2482 struct hci_request req;
2485 BT_DBG("request for %s", hdev->name);
2489 if (pending_eir_or_class(hdev)) {
2490 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2495 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2496 hci_uuids_clear(hdev);
2498 if (enable_service_cache(hdev)) {
2499 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2500 0, hdev->dev_class, 3);
2509 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2510 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2513 list_del(&match->list);
2519 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2520 MGMT_STATUS_INVALID_PARAMS);
2525 hci_req_init(&req, hdev);
2530 err = hci_req_run(&req, remove_uuid_complete);
2532 if (err != -ENODATA)
2535 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2536 hdev->dev_class, 3);
2540 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2549 hci_dev_unlock(hdev);
2553 static void set_class_complete(struct hci_dev *hdev, u8 status)
2555 BT_DBG("status 0x%02x", status);
2557 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2560 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2563 struct mgmt_cp_set_dev_class *cp = data;
2564 struct pending_cmd *cmd;
2565 struct hci_request req;
2568 BT_DBG("request for %s", hdev->name);
2570 if (!lmp_bredr_capable(hdev))
2571 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2572 MGMT_STATUS_NOT_SUPPORTED);
2576 if (pending_eir_or_class(hdev)) {
2577 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2582 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2583 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2584 MGMT_STATUS_INVALID_PARAMS);
2588 hdev->major_class = cp->major;
2589 hdev->minor_class = cp->minor;
2591 if (!hdev_is_powered(hdev)) {
2592 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2593 hdev->dev_class, 3);
2597 hci_req_init(&req, hdev);
2599 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2600 hci_dev_unlock(hdev);
2601 cancel_delayed_work_sync(&hdev->service_cache);
2608 err = hci_req_run(&req, set_class_complete);
2610 if (err != -ENODATA)
2613 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2614 hdev->dev_class, 3);
2618 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2627 hci_dev_unlock(hdev);
2631 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2634 struct mgmt_cp_load_link_keys *cp = data;
2635 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2636 sizeof(struct mgmt_link_key_info));
2637 u16 key_count, expected_len;
2641 BT_DBG("request for %s", hdev->name);
2643 if (!lmp_bredr_capable(hdev))
2644 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2645 MGMT_STATUS_NOT_SUPPORTED);
2647 key_count = __le16_to_cpu(cp->key_count);
2648 if (key_count > max_key_count) {
2649 BT_ERR("load_link_keys: too big key_count value %u",
2651 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2652 MGMT_STATUS_INVALID_PARAMS);
2655 expected_len = sizeof(*cp) + key_count *
2656 sizeof(struct mgmt_link_key_info);
2657 if (expected_len != len) {
2658 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2660 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2661 MGMT_STATUS_INVALID_PARAMS);
2664 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2665 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2666 MGMT_STATUS_INVALID_PARAMS);
2668 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2671 for (i = 0; i < key_count; i++) {
2672 struct mgmt_link_key_info *key = &cp->keys[i];
2674 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2675 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2676 MGMT_STATUS_INVALID_PARAMS);
2681 hci_link_keys_clear(hdev);
2684 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2687 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2691 new_settings(hdev, NULL);
2693 for (i = 0; i < key_count; i++) {
2694 struct mgmt_link_key_info *key = &cp->keys[i];
2696 /* Always ignore debug keys and require a new pairing if
2697 * the user wants to use them.
2699 if (key->type == HCI_LK_DEBUG_COMBINATION)
2702 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2703 key->type, key->pin_len, NULL);
2706 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2708 hci_dev_unlock(hdev);
2713 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2714 u8 addr_type, struct sock *skip_sk)
2716 struct mgmt_ev_device_unpaired ev;
2718 bacpy(&ev.addr.bdaddr, bdaddr);
2719 ev.addr.type = addr_type;
2721 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2725 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2728 struct mgmt_cp_unpair_device *cp = data;
2729 struct mgmt_rp_unpair_device rp;
2730 struct hci_cp_disconnect dc;
2731 struct pending_cmd *cmd;
2732 struct hci_conn *conn;
2735 memset(&rp, 0, sizeof(rp));
2736 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2737 rp.addr.type = cp->addr.type;
2739 if (!bdaddr_type_is_valid(cp->addr.type))
2740 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2741 MGMT_STATUS_INVALID_PARAMS,
2744 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2745 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2746 MGMT_STATUS_INVALID_PARAMS,
2751 if (!hdev_is_powered(hdev)) {
2752 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2753 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2757 if (cp->addr.type == BDADDR_BREDR) {
2758 /* If disconnection is requested, then look up the
2759 * connection. If the remote device is connected, it
2760 * will be later used to terminate the link.
2762 * Setting it to NULL explicitly will cause no
2763 * termination of the link.
2766 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2771 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2775 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2778 /* Defer clearing up the connection parameters
2779 * until closing to give a chance of keeping
2780 * them if a repairing happens.
2782 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2784 /* If disconnection is not requested, then
2785 * clear the connection variable so that the
2786 * link is not terminated.
2788 if (!cp->disconnect)
2792 if (cp->addr.type == BDADDR_LE_PUBLIC)
2793 addr_type = ADDR_LE_DEV_PUBLIC;
2795 addr_type = ADDR_LE_DEV_RANDOM;
2797 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2799 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2803 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2804 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2808 /* If the connection variable is set, then termination of the
2809 * link is requested.
2812 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2814 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2818 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2825 cmd->cmd_complete = addr_cmd_complete;
2827 dc.handle = cpu_to_le16(conn->handle);
2828 dc.reason = 0x13; /* Remote User Terminated Connection */
2829 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2831 mgmt_pending_remove(cmd);
2834 hci_dev_unlock(hdev);
2838 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2841 struct mgmt_cp_disconnect *cp = data;
2842 struct mgmt_rp_disconnect rp;
2843 struct pending_cmd *cmd;
2844 struct hci_conn *conn;
2849 memset(&rp, 0, sizeof(rp));
2850 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2851 rp.addr.type = cp->addr.type;
2853 if (!bdaddr_type_is_valid(cp->addr.type))
2854 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2855 MGMT_STATUS_INVALID_PARAMS,
2860 if (!test_bit(HCI_UP, &hdev->flags)) {
2861 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2862 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2866 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2867 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2868 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2872 if (cp->addr.type == BDADDR_BREDR)
2873 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2876 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2878 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2879 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2880 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2884 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2890 cmd->cmd_complete = generic_cmd_complete;
2892 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2894 mgmt_pending_remove(cmd);
2897 hci_dev_unlock(hdev);
2901 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2903 switch (link_type) {
2905 switch (addr_type) {
2906 case ADDR_LE_DEV_PUBLIC:
2907 return BDADDR_LE_PUBLIC;
2910 /* Fallback to LE Random address type */
2911 return BDADDR_LE_RANDOM;
2915 /* Fallback to BR/EDR type */
2916 return BDADDR_BREDR;
2920 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2923 struct mgmt_rp_get_connections *rp;
2933 if (!hdev_is_powered(hdev)) {
2934 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2935 MGMT_STATUS_NOT_POWERED);
2940 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2941 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2945 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2946 rp = kmalloc(rp_len, GFP_KERNEL);
2953 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2954 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2956 bacpy(&rp->addr[i].bdaddr, &c->dst);
2957 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2958 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2963 rp->conn_count = cpu_to_le16(i);
2965 /* Recalculate length in case of filtered SCO connections, etc */
2966 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2968 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2974 hci_dev_unlock(hdev);
2978 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2979 struct mgmt_cp_pin_code_neg_reply *cp)
2981 struct pending_cmd *cmd;
2984 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2989 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2990 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2992 mgmt_pending_remove(cmd);
2997 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3000 struct hci_conn *conn;
3001 struct mgmt_cp_pin_code_reply *cp = data;
3002 struct hci_cp_pin_code_reply reply;
3003 struct pending_cmd *cmd;
3010 if (!hdev_is_powered(hdev)) {
3011 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3012 MGMT_STATUS_NOT_POWERED);
3016 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3018 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3019 MGMT_STATUS_NOT_CONNECTED);
3023 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3024 struct mgmt_cp_pin_code_neg_reply ncp;
3026 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3028 BT_ERR("PIN code is not 16 bytes long");
3030 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3032 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3033 MGMT_STATUS_INVALID_PARAMS);
3038 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3044 cmd->cmd_complete = addr_cmd_complete;
3046 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3047 reply.pin_len = cp->pin_len;
3048 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3050 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3052 mgmt_pending_remove(cmd);
3055 hci_dev_unlock(hdev);
3059 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3062 struct mgmt_cp_set_io_capability *cp = data;
3066 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3067 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3068 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3072 hdev->io_capability = cp->io_capability;
3074 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3075 hdev->io_capability);
3077 hci_dev_unlock(hdev);
3079 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3083 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3085 struct hci_dev *hdev = conn->hdev;
3086 struct pending_cmd *cmd;
3088 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3089 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3092 if (cmd->user_data != conn)
3101 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3103 struct mgmt_rp_pair_device rp;
3104 struct hci_conn *conn = cmd->user_data;
3106 bacpy(&rp.addr.bdaddr, &conn->dst);
3107 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3109 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3112 /* So we don't get further callbacks for this connection */
3113 conn->connect_cfm_cb = NULL;
3114 conn->security_cfm_cb = NULL;
3115 conn->disconn_cfm_cb = NULL;
3117 hci_conn_drop(conn);
3119 /* The device is paired so there is no need to remove
3120 * its connection parameters anymore.
3122 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3127 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3129 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3130 struct pending_cmd *cmd;
3132 cmd = find_pairing(conn);
3134 cmd->cmd_complete(cmd, status);
3135 mgmt_pending_remove(cmd);
3139 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3141 struct pending_cmd *cmd;
3143 BT_DBG("status %u", status);
3145 cmd = find_pairing(conn);
3147 BT_DBG("Unable to find a pending command");
3151 cmd->cmd_complete(cmd, mgmt_status(status));
3152 mgmt_pending_remove(cmd);
3155 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3157 struct pending_cmd *cmd;
3159 BT_DBG("status %u", status);
3164 cmd = find_pairing(conn);
3166 BT_DBG("Unable to find a pending command");
3170 cmd->cmd_complete(cmd, mgmt_status(status));
3171 mgmt_pending_remove(cmd);
3174 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3177 struct mgmt_cp_pair_device *cp = data;
3178 struct mgmt_rp_pair_device rp;
3179 struct pending_cmd *cmd;
3180 u8 sec_level, auth_type;
3181 struct hci_conn *conn;
3186 memset(&rp, 0, sizeof(rp));
3187 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3188 rp.addr.type = cp->addr.type;
3190 if (!bdaddr_type_is_valid(cp->addr.type))
3191 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3192 MGMT_STATUS_INVALID_PARAMS,
3195 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3196 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3197 MGMT_STATUS_INVALID_PARAMS,
3202 if (!hdev_is_powered(hdev)) {
3203 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3204 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3208 sec_level = BT_SECURITY_MEDIUM;
3209 auth_type = HCI_AT_DEDICATED_BONDING;
3211 if (cp->addr.type == BDADDR_BREDR) {
3212 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3217 /* Convert from L2CAP channel address type to HCI address type
3219 if (cp->addr.type == BDADDR_LE_PUBLIC)
3220 addr_type = ADDR_LE_DEV_PUBLIC;
3222 addr_type = ADDR_LE_DEV_RANDOM;
3224 /* When pairing a new device, it is expected to remember
3225 * this device for future connections. Adding the connection
3226 * parameter information ahead of time allows tracking
3227 * of the slave preferred values and will speed up any
3228 * further connection establishment.
3230 * If connection parameters already exist, then they
3231 * will be kept and this function does nothing.
3233 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3235 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3236 sec_level, HCI_LE_CONN_TIMEOUT,
3243 if (PTR_ERR(conn) == -EBUSY)
3244 status = MGMT_STATUS_BUSY;
3246 status = MGMT_STATUS_CONNECT_FAILED;
3248 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3254 if (conn->connect_cfm_cb) {
3255 hci_conn_drop(conn);
3256 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3257 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3261 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3264 hci_conn_drop(conn);
3268 cmd->cmd_complete = pairing_complete;
3270 /* For LE, just connecting isn't a proof that the pairing finished */
3271 if (cp->addr.type == BDADDR_BREDR) {
3272 conn->connect_cfm_cb = pairing_complete_cb;
3273 conn->security_cfm_cb = pairing_complete_cb;
3274 conn->disconn_cfm_cb = pairing_complete_cb;
3276 conn->connect_cfm_cb = le_pairing_complete_cb;
3277 conn->security_cfm_cb = le_pairing_complete_cb;
3278 conn->disconn_cfm_cb = le_pairing_complete_cb;
3281 conn->io_capability = cp->io_cap;
3282 cmd->user_data = hci_conn_get(conn);
3284 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3285 hci_conn_security(conn, sec_level, auth_type, true)) {
3286 cmd->cmd_complete(cmd, 0);
3287 mgmt_pending_remove(cmd);
3293 hci_dev_unlock(hdev);
3297 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3300 struct mgmt_addr_info *addr = data;
3301 struct pending_cmd *cmd;
3302 struct hci_conn *conn;
3309 if (!hdev_is_powered(hdev)) {
3310 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3311 MGMT_STATUS_NOT_POWERED);
3315 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3317 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3318 MGMT_STATUS_INVALID_PARAMS);
3322 conn = cmd->user_data;
3324 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3325 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3326 MGMT_STATUS_INVALID_PARAMS);
3330 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3331 mgmt_pending_remove(cmd);
3333 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3334 addr, sizeof(*addr));
3336 hci_dev_unlock(hdev);
3340 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3341 struct mgmt_addr_info *addr, u16 mgmt_op,
3342 u16 hci_op, __le32 passkey)
3344 struct pending_cmd *cmd;
3345 struct hci_conn *conn;
3350 if (!hdev_is_powered(hdev)) {
3351 err = cmd_complete(sk, hdev->id, mgmt_op,
3352 MGMT_STATUS_NOT_POWERED, addr,
3357 if (addr->type == BDADDR_BREDR)
3358 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3360 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3363 err = cmd_complete(sk, hdev->id, mgmt_op,
3364 MGMT_STATUS_NOT_CONNECTED, addr,
3369 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3370 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3372 err = cmd_complete(sk, hdev->id, mgmt_op,
3373 MGMT_STATUS_SUCCESS, addr,
3376 err = cmd_complete(sk, hdev->id, mgmt_op,
3377 MGMT_STATUS_FAILED, addr,
3383 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3389 cmd->cmd_complete = addr_cmd_complete;
3391 /* Continue with pairing via HCI */
3392 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3393 struct hci_cp_user_passkey_reply cp;
3395 bacpy(&cp.bdaddr, &addr->bdaddr);
3396 cp.passkey = passkey;
3397 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3399 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3403 mgmt_pending_remove(cmd);
3406 hci_dev_unlock(hdev);
3410 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3411 void *data, u16 len)
3413 struct mgmt_cp_pin_code_neg_reply *cp = data;
3417 return user_pairing_resp(sk, hdev, &cp->addr,
3418 MGMT_OP_PIN_CODE_NEG_REPLY,
3419 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3422 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3425 struct mgmt_cp_user_confirm_reply *cp = data;
3429 if (len != sizeof(*cp))
3430 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3431 MGMT_STATUS_INVALID_PARAMS);
3433 return user_pairing_resp(sk, hdev, &cp->addr,
3434 MGMT_OP_USER_CONFIRM_REPLY,
3435 HCI_OP_USER_CONFIRM_REPLY, 0);
3438 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3439 void *data, u16 len)
3441 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3445 return user_pairing_resp(sk, hdev, &cp->addr,
3446 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3447 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3450 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3453 struct mgmt_cp_user_passkey_reply *cp = data;
3457 return user_pairing_resp(sk, hdev, &cp->addr,
3458 MGMT_OP_USER_PASSKEY_REPLY,
3459 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3462 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3463 void *data, u16 len)
3465 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3469 return user_pairing_resp(sk, hdev, &cp->addr,
3470 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3471 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3474 static void update_name(struct hci_request *req)
3476 struct hci_dev *hdev = req->hdev;
3477 struct hci_cp_write_local_name cp;
3479 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3481 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3484 static void set_name_complete(struct hci_dev *hdev, u8 status)
3486 struct mgmt_cp_set_local_name *cp;
3487 struct pending_cmd *cmd;
3489 BT_DBG("status 0x%02x", status);
3493 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3500 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3501 mgmt_status(status));
3503 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3506 mgmt_pending_remove(cmd);
3509 hci_dev_unlock(hdev);
3512 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3515 struct mgmt_cp_set_local_name *cp = data;
3516 struct pending_cmd *cmd;
3517 struct hci_request req;
3524 /* If the old values are the same as the new ones just return a
3525 * direct command complete event.
3527 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3528 !memcmp(hdev->short_name, cp->short_name,
3529 sizeof(hdev->short_name))) {
3530 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3535 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3537 if (!hdev_is_powered(hdev)) {
3538 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3540 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3545 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3551 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3557 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3559 hci_req_init(&req, hdev);
3561 if (lmp_bredr_capable(hdev)) {
3566 /* The name is stored in the scan response data and so
3567 * no need to udpate the advertising data here.
3569 if (lmp_le_capable(hdev))
3570 update_scan_rsp_data(&req);
3572 err = hci_req_run(&req, set_name_complete);
3574 mgmt_pending_remove(cmd);
3577 hci_dev_unlock(hdev);
3581 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3582 void *data, u16 data_len)
3584 struct pending_cmd *cmd;
3587 BT_DBG("%s", hdev->name);
3591 if (!hdev_is_powered(hdev)) {
3592 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3593 MGMT_STATUS_NOT_POWERED);
3597 if (!lmp_ssp_capable(hdev)) {
3598 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3599 MGMT_STATUS_NOT_SUPPORTED);
3603 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3604 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3609 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3615 if (bredr_sc_enabled(hdev))
3616 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3619 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3622 mgmt_pending_remove(cmd);
3625 hci_dev_unlock(hdev);
3629 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3630 void *data, u16 len)
3634 BT_DBG("%s ", hdev->name);
3638 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3639 struct mgmt_cp_add_remote_oob_data *cp = data;
3642 if (cp->addr.type != BDADDR_BREDR) {
3643 err = cmd_complete(sk, hdev->id,
3644 MGMT_OP_ADD_REMOTE_OOB_DATA,
3645 MGMT_STATUS_INVALID_PARAMS,
3646 &cp->addr, sizeof(cp->addr));
3650 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3651 cp->addr.type, cp->hash,
3652 cp->rand, NULL, NULL);
3654 status = MGMT_STATUS_FAILED;
3656 status = MGMT_STATUS_SUCCESS;
3658 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3659 status, &cp->addr, sizeof(cp->addr));
3660 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3661 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3662 u8 *rand192, *hash192;
3665 if (cp->addr.type != BDADDR_BREDR) {
3666 err = cmd_complete(sk, hdev->id,
3667 MGMT_OP_ADD_REMOTE_OOB_DATA,
3668 MGMT_STATUS_INVALID_PARAMS,
3669 &cp->addr, sizeof(cp->addr));
3673 if (bdaddr_type_is_le(cp->addr.type)) {
3677 rand192 = cp->rand192;
3678 hash192 = cp->hash192;
3681 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3682 cp->addr.type, hash192, rand192,
3683 cp->hash256, cp->rand256);
3685 status = MGMT_STATUS_FAILED;
3687 status = MGMT_STATUS_SUCCESS;
3689 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3690 status, &cp->addr, sizeof(cp->addr));
3692 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3693 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3694 MGMT_STATUS_INVALID_PARAMS);
3698 hci_dev_unlock(hdev);
3702 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3703 void *data, u16 len)
3705 struct mgmt_cp_remove_remote_oob_data *cp = data;
3709 BT_DBG("%s", hdev->name);
3711 if (cp->addr.type != BDADDR_BREDR)
3712 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3713 MGMT_STATUS_INVALID_PARAMS,
3714 &cp->addr, sizeof(cp->addr));
3718 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3719 hci_remote_oob_data_clear(hdev);
3720 status = MGMT_STATUS_SUCCESS;
3724 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3726 status = MGMT_STATUS_INVALID_PARAMS;
3728 status = MGMT_STATUS_SUCCESS;
3731 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3732 status, &cp->addr, sizeof(cp->addr));
3734 hci_dev_unlock(hdev);
3738 static bool trigger_discovery(struct hci_request *req, u8 *status)
3740 struct hci_dev *hdev = req->hdev;
3741 struct hci_cp_le_set_scan_param param_cp;
3742 struct hci_cp_le_set_scan_enable enable_cp;
3743 struct hci_cp_inquiry inq_cp;
3744 /* General inquiry access code (GIAC) */
3745 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3749 switch (hdev->discovery.type) {
3750 case DISCOV_TYPE_BREDR:
3751 *status = mgmt_bredr_support(hdev);
3755 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3756 *status = MGMT_STATUS_BUSY;
3760 hci_inquiry_cache_flush(hdev);
3762 memset(&inq_cp, 0, sizeof(inq_cp));
3763 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3764 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3765 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3768 case DISCOV_TYPE_LE:
3769 case DISCOV_TYPE_INTERLEAVED:
3770 *status = mgmt_le_support(hdev);
3774 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3775 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3776 *status = MGMT_STATUS_NOT_SUPPORTED;
3780 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3781 /* Don't let discovery abort an outgoing
3782 * connection attempt that's using directed
3785 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3787 *status = MGMT_STATUS_REJECTED;
3791 disable_advertising(req);
3794 /* If controller is scanning, it means the background scanning
3795 * is running. Thus, we should temporarily stop it in order to
3796 * set the discovery scanning parameters.
3798 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3799 hci_req_add_le_scan_disable(req);
3801 memset(¶m_cp, 0, sizeof(param_cp));
3803 /* All active scans will be done with either a resolvable
3804 * private address (when privacy feature has been enabled)
3805 * or non-resolvable private address.
3807 err = hci_update_random_address(req, true, &own_addr_type);
3809 *status = MGMT_STATUS_FAILED;
3813 param_cp.type = LE_SCAN_ACTIVE;
3814 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3815 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3816 param_cp.own_address_type = own_addr_type;
3817 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3820 memset(&enable_cp, 0, sizeof(enable_cp));
3821 enable_cp.enable = LE_SCAN_ENABLE;
3822 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3823 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3828 *status = MGMT_STATUS_INVALID_PARAMS;
3835 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3837 struct pending_cmd *cmd;
3838 unsigned long timeout;
3840 BT_DBG("status %d", status);
3844 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3846 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3849 cmd->cmd_complete(cmd, mgmt_status(status));
3850 mgmt_pending_remove(cmd);
3854 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3858 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3860 switch (hdev->discovery.type) {
3861 case DISCOV_TYPE_LE:
3862 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3864 case DISCOV_TYPE_INTERLEAVED:
3865 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3867 case DISCOV_TYPE_BREDR:
3871 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3877 queue_delayed_work(hdev->workqueue,
3878 &hdev->le_scan_disable, timeout);
3881 hci_dev_unlock(hdev);
3884 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3885 void *data, u16 len)
3887 struct mgmt_cp_start_discovery *cp = data;
3888 struct pending_cmd *cmd;
3889 struct hci_request req;
3893 BT_DBG("%s", hdev->name);
3897 if (!hdev_is_powered(hdev)) {
3898 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3899 MGMT_STATUS_NOT_POWERED,
3900 &cp->type, sizeof(cp->type));
3904 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3905 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3906 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3907 MGMT_STATUS_BUSY, &cp->type,
3912 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3918 cmd->cmd_complete = generic_cmd_complete;
3920 /* Clear the discovery filter first to free any previously
3921 * allocated memory for the UUID list.
3923 hci_discovery_filter_clear(hdev);
3925 hdev->discovery.type = cp->type;
3926 hdev->discovery.report_invalid_rssi = false;
3928 hci_req_init(&req, hdev);
3930 if (!trigger_discovery(&req, &status)) {
3931 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3932 status, &cp->type, sizeof(cp->type));
3933 mgmt_pending_remove(cmd);
3937 err = hci_req_run(&req, start_discovery_complete);
3939 mgmt_pending_remove(cmd);
3943 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3946 hci_dev_unlock(hdev);
3950 static void service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
3952 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, 1);
3955 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3956 void *data, u16 len)
3958 struct mgmt_cp_start_service_discovery *cp = data;
3959 struct pending_cmd *cmd;
3960 struct hci_request req;
3961 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3962 u16 uuid_count, expected_len;
3966 BT_DBG("%s", hdev->name);
3970 if (!hdev_is_powered(hdev)) {
3971 err = cmd_complete(sk, hdev->id,
3972 MGMT_OP_START_SERVICE_DISCOVERY,
3973 MGMT_STATUS_NOT_POWERED,
3974 &cp->type, sizeof(cp->type));
3978 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3979 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3980 err = cmd_complete(sk, hdev->id,
3981 MGMT_OP_START_SERVICE_DISCOVERY,
3982 MGMT_STATUS_BUSY, &cp->type,
3987 uuid_count = __le16_to_cpu(cp->uuid_count);
3988 if (uuid_count > max_uuid_count) {
3989 BT_ERR("service_discovery: too big uuid_count value %u",
3991 err = cmd_complete(sk, hdev->id,
3992 MGMT_OP_START_SERVICE_DISCOVERY,
3993 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3998 expected_len = sizeof(*cp) + uuid_count * 16;
3999 if (expected_len != len) {
4000 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4002 err = cmd_complete(sk, hdev->id,
4003 MGMT_OP_START_SERVICE_DISCOVERY,
4004 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4009 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4016 cmd->cmd_complete = service_discovery_cmd_complete;
4018 /* Clear the discovery filter first to free any previously
4019 * allocated memory for the UUID list.
4021 hci_discovery_filter_clear(hdev);
4023 hdev->discovery.type = cp->type;
4024 hdev->discovery.rssi = cp->rssi;
4025 hdev->discovery.uuid_count = uuid_count;
4027 if (uuid_count > 0) {
4028 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4030 if (!hdev->discovery.uuids) {
4031 err = cmd_complete(sk, hdev->id,
4032 MGMT_OP_START_SERVICE_DISCOVERY,
4034 &cp->type, sizeof(cp->type));
4035 mgmt_pending_remove(cmd);
4040 hci_req_init(&req, hdev);
4042 if (!trigger_discovery(&req, &status)) {
4043 err = cmd_complete(sk, hdev->id,
4044 MGMT_OP_START_SERVICE_DISCOVERY,
4045 status, &cp->type, sizeof(cp->type));
4046 mgmt_pending_remove(cmd);
4050 err = hci_req_run(&req, start_discovery_complete);
4052 mgmt_pending_remove(cmd);
4056 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4059 hci_dev_unlock(hdev);
4063 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
4065 struct pending_cmd *cmd;
4067 BT_DBG("status %d", status);
4071 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4073 cmd->cmd_complete(cmd, mgmt_status(status));
4074 mgmt_pending_remove(cmd);
4078 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4080 hci_dev_unlock(hdev);
4083 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4086 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4087 struct pending_cmd *cmd;
4088 struct hci_request req;
4091 BT_DBG("%s", hdev->name);
4095 if (!hci_discovery_active(hdev)) {
4096 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4097 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4098 sizeof(mgmt_cp->type));
4102 if (hdev->discovery.type != mgmt_cp->type) {
4103 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4104 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4105 sizeof(mgmt_cp->type));
4109 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4115 cmd->cmd_complete = generic_cmd_complete;
4117 hci_req_init(&req, hdev);
4119 hci_stop_discovery(&req);
4121 err = hci_req_run(&req, stop_discovery_complete);
4123 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4127 mgmt_pending_remove(cmd);
4129 /* If no HCI commands were sent we're done */
4130 if (err == -ENODATA) {
4131 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4132 &mgmt_cp->type, sizeof(mgmt_cp->type));
4133 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4137 hci_dev_unlock(hdev);
4141 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4144 struct mgmt_cp_confirm_name *cp = data;
4145 struct inquiry_entry *e;
4148 BT_DBG("%s", hdev->name);
4152 if (!hci_discovery_active(hdev)) {
4153 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4154 MGMT_STATUS_FAILED, &cp->addr,
4159 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4161 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4162 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4167 if (cp->name_known) {
4168 e->name_state = NAME_KNOWN;
4171 e->name_state = NAME_NEEDED;
4172 hci_inquiry_cache_update_resolve(hdev, e);
4175 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4179 hci_dev_unlock(hdev);
4183 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4186 struct mgmt_cp_block_device *cp = data;
4190 BT_DBG("%s", hdev->name);
4192 if (!bdaddr_type_is_valid(cp->addr.type))
4193 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4194 MGMT_STATUS_INVALID_PARAMS,
4195 &cp->addr, sizeof(cp->addr));
4199 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4202 status = MGMT_STATUS_FAILED;
4206 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4208 status = MGMT_STATUS_SUCCESS;
4211 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4212 &cp->addr, sizeof(cp->addr));
4214 hci_dev_unlock(hdev);
4219 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4222 struct mgmt_cp_unblock_device *cp = data;
4226 BT_DBG("%s", hdev->name);
4228 if (!bdaddr_type_is_valid(cp->addr.type))
4229 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4230 MGMT_STATUS_INVALID_PARAMS,
4231 &cp->addr, sizeof(cp->addr));
4235 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4238 status = MGMT_STATUS_INVALID_PARAMS;
4242 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4244 status = MGMT_STATUS_SUCCESS;
4247 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4248 &cp->addr, sizeof(cp->addr));
4250 hci_dev_unlock(hdev);
4255 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4258 struct mgmt_cp_set_device_id *cp = data;
4259 struct hci_request req;
4263 BT_DBG("%s", hdev->name);
4265 source = __le16_to_cpu(cp->source);
4267 if (source > 0x0002)
4268 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4269 MGMT_STATUS_INVALID_PARAMS);
4273 hdev->devid_source = source;
4274 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4275 hdev->devid_product = __le16_to_cpu(cp->product);
4276 hdev->devid_version = __le16_to_cpu(cp->version);
4278 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4280 hci_req_init(&req, hdev);
4282 hci_req_run(&req, NULL);
4284 hci_dev_unlock(hdev);
4289 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4291 struct cmd_lookup match = { NULL, hdev };
4296 u8 mgmt_err = mgmt_status(status);
4298 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4299 cmd_status_rsp, &mgmt_err);
4303 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4304 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4306 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4308 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4311 new_settings(hdev, match.sk);
4317 hci_dev_unlock(hdev);
4320 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4323 struct mgmt_mode *cp = data;
4324 struct pending_cmd *cmd;
4325 struct hci_request req;
4326 u8 val, enabled, status;
4329 BT_DBG("request for %s", hdev->name);
4331 status = mgmt_le_support(hdev);
4333 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4336 if (cp->val != 0x00 && cp->val != 0x01)
4337 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4338 MGMT_STATUS_INVALID_PARAMS);
4343 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4345 /* The following conditions are ones which mean that we should
4346 * not do any HCI communication but directly send a mgmt
4347 * response to user space (after toggling the flag if
4350 if (!hdev_is_powered(hdev) || val == enabled ||
4351 hci_conn_num(hdev, LE_LINK) > 0 ||
4352 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4353 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4354 bool changed = false;
4356 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4357 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4361 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4366 err = new_settings(hdev, sk);
4371 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4372 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4373 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4378 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4384 hci_req_init(&req, hdev);
4387 enable_advertising(&req);
4389 disable_advertising(&req);
4391 err = hci_req_run(&req, set_advertising_complete);
4393 mgmt_pending_remove(cmd);
4396 hci_dev_unlock(hdev);
4400 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4401 void *data, u16 len)
4403 struct mgmt_cp_set_static_address *cp = data;
4406 BT_DBG("%s", hdev->name);
4408 if (!lmp_le_capable(hdev))
4409 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4410 MGMT_STATUS_NOT_SUPPORTED);
4412 if (hdev_is_powered(hdev))
4413 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4414 MGMT_STATUS_REJECTED);
4416 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4417 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4418 return cmd_status(sk, hdev->id,
4419 MGMT_OP_SET_STATIC_ADDRESS,
4420 MGMT_STATUS_INVALID_PARAMS);
4422 /* Two most significant bits shall be set */
4423 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4424 return cmd_status(sk, hdev->id,
4425 MGMT_OP_SET_STATIC_ADDRESS,
4426 MGMT_STATUS_INVALID_PARAMS);
4431 bacpy(&hdev->static_addr, &cp->bdaddr);
4433 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4435 hci_dev_unlock(hdev);
4440 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4441 void *data, u16 len)
4443 struct mgmt_cp_set_scan_params *cp = data;
4444 __u16 interval, window;
4447 BT_DBG("%s", hdev->name);
4449 if (!lmp_le_capable(hdev))
4450 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4451 MGMT_STATUS_NOT_SUPPORTED);
4453 interval = __le16_to_cpu(cp->interval);
4455 if (interval < 0x0004 || interval > 0x4000)
4456 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4457 MGMT_STATUS_INVALID_PARAMS);
4459 window = __le16_to_cpu(cp->window);
4461 if (window < 0x0004 || window > 0x4000)
4462 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4463 MGMT_STATUS_INVALID_PARAMS);
4465 if (window > interval)
4466 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4467 MGMT_STATUS_INVALID_PARAMS);
4471 hdev->le_scan_interval = interval;
4472 hdev->le_scan_window = window;
4474 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4476 /* If background scan is running, restart it so new parameters are
4479 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4480 hdev->discovery.state == DISCOVERY_STOPPED) {
4481 struct hci_request req;
4483 hci_req_init(&req, hdev);
4485 hci_req_add_le_scan_disable(&req);
4486 hci_req_add_le_passive_scan(&req);
4488 hci_req_run(&req, NULL);
4491 hci_dev_unlock(hdev);
4496 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4498 struct pending_cmd *cmd;
4500 BT_DBG("status 0x%02x", status);
4504 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4509 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4510 mgmt_status(status));
4512 struct mgmt_mode *cp = cmd->param;
4515 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4517 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4519 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4520 new_settings(hdev, cmd->sk);
4523 mgmt_pending_remove(cmd);
4526 hci_dev_unlock(hdev);
4529 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4530 void *data, u16 len)
4532 struct mgmt_mode *cp = data;
4533 struct pending_cmd *cmd;
4534 struct hci_request req;
4537 BT_DBG("%s", hdev->name);
4539 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4540 hdev->hci_ver < BLUETOOTH_VER_1_2)
4541 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4542 MGMT_STATUS_NOT_SUPPORTED);
4544 if (cp->val != 0x00 && cp->val != 0x01)
4545 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4546 MGMT_STATUS_INVALID_PARAMS);
4548 if (!hdev_is_powered(hdev))
4549 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4550 MGMT_STATUS_NOT_POWERED);
4552 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4553 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4554 MGMT_STATUS_REJECTED);
4558 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4559 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4564 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4565 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4570 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4577 hci_req_init(&req, hdev);
4579 write_fast_connectable(&req, cp->val);
4581 err = hci_req_run(&req, fast_connectable_complete);
4583 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4584 MGMT_STATUS_FAILED);
4585 mgmt_pending_remove(cmd);
4589 hci_dev_unlock(hdev);
4594 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4596 struct pending_cmd *cmd;
4598 BT_DBG("status 0x%02x", status);
4602 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4607 u8 mgmt_err = mgmt_status(status);
4609 /* We need to restore the flag if related HCI commands
4612 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4614 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4616 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4617 new_settings(hdev, cmd->sk);
4620 mgmt_pending_remove(cmd);
4623 hci_dev_unlock(hdev);
4626 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4628 struct mgmt_mode *cp = data;
4629 struct pending_cmd *cmd;
4630 struct hci_request req;
4633 BT_DBG("request for %s", hdev->name);
4635 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4636 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4637 MGMT_STATUS_NOT_SUPPORTED);
4639 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4640 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4641 MGMT_STATUS_REJECTED);
4643 if (cp->val != 0x00 && cp->val != 0x01)
4644 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4645 MGMT_STATUS_INVALID_PARAMS);
4649 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4650 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4654 if (!hdev_is_powered(hdev)) {
4656 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4657 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4658 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4659 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4660 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4663 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4665 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4669 err = new_settings(hdev, sk);
4673 /* Reject disabling when powered on */
4675 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4676 MGMT_STATUS_REJECTED);
4680 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4681 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4686 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4692 /* We need to flip the bit already here so that update_adv_data
4693 * generates the correct flags.
4695 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4697 hci_req_init(&req, hdev);
4699 write_fast_connectable(&req, false);
4700 hci_update_page_scan(hdev, &req);
4702 /* Since only the advertising data flags will change, there
4703 * is no need to update the scan response data.
4705 update_adv_data(&req);
4707 err = hci_req_run(&req, set_bredr_complete);
4709 mgmt_pending_remove(cmd);
4712 hci_dev_unlock(hdev);
4716 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4717 void *data, u16 len)
4719 struct mgmt_mode *cp = data;
4720 struct pending_cmd *cmd;
4724 BT_DBG("request for %s", hdev->name);
4726 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4727 !lmp_sc_capable(hdev) && !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4728 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4729 MGMT_STATUS_NOT_SUPPORTED);
4731 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4732 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4733 MGMT_STATUS_INVALID_PARAMS);
4737 if (!hdev_is_powered(hdev) ||
4738 (!lmp_sc_capable(hdev) &&
4739 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) ||
4740 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4744 changed = !test_and_set_bit(HCI_SC_ENABLED,
4746 if (cp->val == 0x02)
4747 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4749 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4751 changed = test_and_clear_bit(HCI_SC_ENABLED,
4753 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4756 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4761 err = new_settings(hdev, sk);
4766 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4767 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4774 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4775 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4776 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4780 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4786 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4788 mgmt_pending_remove(cmd);
4792 if (cp->val == 0x02)
4793 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4795 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4798 hci_dev_unlock(hdev);
4802 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4803 void *data, u16 len)
4805 struct mgmt_mode *cp = data;
4806 bool changed, use_changed;
4809 BT_DBG("request for %s", hdev->name);
4811 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4812 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4813 MGMT_STATUS_INVALID_PARAMS);
4818 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4821 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4824 if (cp->val == 0x02)
4825 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4828 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4831 if (hdev_is_powered(hdev) && use_changed &&
4832 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4833 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4834 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4835 sizeof(mode), &mode);
4838 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4843 err = new_settings(hdev, sk);
4846 hci_dev_unlock(hdev);
4850 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4853 struct mgmt_cp_set_privacy *cp = cp_data;
4857 BT_DBG("request for %s", hdev->name);
4859 if (!lmp_le_capable(hdev))
4860 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4861 MGMT_STATUS_NOT_SUPPORTED);
4863 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4864 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4865 MGMT_STATUS_INVALID_PARAMS);
4867 if (hdev_is_powered(hdev))
4868 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4869 MGMT_STATUS_REJECTED);
4873 /* If user space supports this command it is also expected to
4874 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4876 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4879 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4880 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4881 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4883 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4884 memset(hdev->irk, 0, sizeof(hdev->irk));
4885 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4888 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4893 err = new_settings(hdev, sk);
4896 hci_dev_unlock(hdev);
4900 static bool irk_is_valid(struct mgmt_irk_info *irk)
4902 switch (irk->addr.type) {
4903 case BDADDR_LE_PUBLIC:
4906 case BDADDR_LE_RANDOM:
4907 /* Two most significant bits shall be set */
4908 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4916 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4919 struct mgmt_cp_load_irks *cp = cp_data;
4920 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4921 sizeof(struct mgmt_irk_info));
4922 u16 irk_count, expected_len;
4925 BT_DBG("request for %s", hdev->name);
4927 if (!lmp_le_capable(hdev))
4928 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4929 MGMT_STATUS_NOT_SUPPORTED);
4931 irk_count = __le16_to_cpu(cp->irk_count);
4932 if (irk_count > max_irk_count) {
4933 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4934 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4935 MGMT_STATUS_INVALID_PARAMS);
4938 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4939 if (expected_len != len) {
4940 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4942 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4943 MGMT_STATUS_INVALID_PARAMS);
4946 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4948 for (i = 0; i < irk_count; i++) {
4949 struct mgmt_irk_info *key = &cp->irks[i];
4951 if (!irk_is_valid(key))
4952 return cmd_status(sk, hdev->id,
4954 MGMT_STATUS_INVALID_PARAMS);
4959 hci_smp_irks_clear(hdev);
4961 for (i = 0; i < irk_count; i++) {
4962 struct mgmt_irk_info *irk = &cp->irks[i];
4965 if (irk->addr.type == BDADDR_LE_PUBLIC)
4966 addr_type = ADDR_LE_DEV_PUBLIC;
4968 addr_type = ADDR_LE_DEV_RANDOM;
4970 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4974 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4976 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4978 hci_dev_unlock(hdev);
4983 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4985 if (key->master != 0x00 && key->master != 0x01)
4988 switch (key->addr.type) {
4989 case BDADDR_LE_PUBLIC:
4992 case BDADDR_LE_RANDOM:
4993 /* Two most significant bits shall be set */
4994 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5002 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5003 void *cp_data, u16 len)
5005 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5006 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5007 sizeof(struct mgmt_ltk_info));
5008 u16 key_count, expected_len;
5011 BT_DBG("request for %s", hdev->name);
5013 if (!lmp_le_capable(hdev))
5014 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5015 MGMT_STATUS_NOT_SUPPORTED);
5017 key_count = __le16_to_cpu(cp->key_count);
5018 if (key_count > max_key_count) {
5019 BT_ERR("load_ltks: too big key_count value %u", key_count);
5020 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5021 MGMT_STATUS_INVALID_PARAMS);
5024 expected_len = sizeof(*cp) + key_count *
5025 sizeof(struct mgmt_ltk_info);
5026 if (expected_len != len) {
5027 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5029 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5030 MGMT_STATUS_INVALID_PARAMS);
5033 BT_DBG("%s key_count %u", hdev->name, key_count);
5035 for (i = 0; i < key_count; i++) {
5036 struct mgmt_ltk_info *key = &cp->keys[i];
5038 if (!ltk_is_valid(key))
5039 return cmd_status(sk, hdev->id,
5040 MGMT_OP_LOAD_LONG_TERM_KEYS,
5041 MGMT_STATUS_INVALID_PARAMS);
5046 hci_smp_ltks_clear(hdev);
5048 for (i = 0; i < key_count; i++) {
5049 struct mgmt_ltk_info *key = &cp->keys[i];
5050 u8 type, addr_type, authenticated;
5052 if (key->addr.type == BDADDR_LE_PUBLIC)
5053 addr_type = ADDR_LE_DEV_PUBLIC;
5055 addr_type = ADDR_LE_DEV_RANDOM;
5057 switch (key->type) {
5058 case MGMT_LTK_UNAUTHENTICATED:
5059 authenticated = 0x00;
5060 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5062 case MGMT_LTK_AUTHENTICATED:
5063 authenticated = 0x01;
5064 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5066 case MGMT_LTK_P256_UNAUTH:
5067 authenticated = 0x00;
5068 type = SMP_LTK_P256;
5070 case MGMT_LTK_P256_AUTH:
5071 authenticated = 0x01;
5072 type = SMP_LTK_P256;
5074 case MGMT_LTK_P256_DEBUG:
5075 authenticated = 0x00;
5076 type = SMP_LTK_P256_DEBUG;
5081 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5082 authenticated, key->val, key->enc_size, key->ediv,
5086 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5089 hci_dev_unlock(hdev);
5094 static void conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5096 struct hci_conn *conn = cmd->user_data;
5097 struct mgmt_rp_get_conn_info rp;
5099 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5101 if (status == MGMT_STATUS_SUCCESS) {
5102 rp.rssi = conn->rssi;
5103 rp.tx_power = conn->tx_power;
5104 rp.max_tx_power = conn->max_tx_power;
5106 rp.rssi = HCI_RSSI_INVALID;
5107 rp.tx_power = HCI_TX_POWER_INVALID;
5108 rp.max_tx_power = HCI_TX_POWER_INVALID;
5111 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5114 hci_conn_drop(conn);
5118 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status)
5120 struct hci_cp_read_rssi *cp;
5121 struct pending_cmd *cmd;
5122 struct hci_conn *conn;
5126 BT_DBG("status 0x%02x", hci_status);
5130 /* Commands sent in request are either Read RSSI or Read Transmit Power
5131 * Level so we check which one was last sent to retrieve connection
5132 * handle. Both commands have handle as first parameter so it's safe to
5133 * cast data on the same command struct.
5135 * First command sent is always Read RSSI and we fail only if it fails.
5136 * In other case we simply override error to indicate success as we
5137 * already remembered if TX power value is actually valid.
5139 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5141 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5142 status = MGMT_STATUS_SUCCESS;
5144 status = mgmt_status(hci_status);
5148 BT_ERR("invalid sent_cmd in conn_info response");
5152 handle = __le16_to_cpu(cp->handle);
5153 conn = hci_conn_hash_lookup_handle(hdev, handle);
5155 BT_ERR("unknown handle (%d) in conn_info response", handle);
5159 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5163 cmd->cmd_complete(cmd, status);
5164 mgmt_pending_remove(cmd);
5167 hci_dev_unlock(hdev);
5170 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5173 struct mgmt_cp_get_conn_info *cp = data;
5174 struct mgmt_rp_get_conn_info rp;
5175 struct hci_conn *conn;
5176 unsigned long conn_info_age;
5179 BT_DBG("%s", hdev->name);
5181 memset(&rp, 0, sizeof(rp));
5182 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5183 rp.addr.type = cp->addr.type;
5185 if (!bdaddr_type_is_valid(cp->addr.type))
5186 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5187 MGMT_STATUS_INVALID_PARAMS,
5192 if (!hdev_is_powered(hdev)) {
5193 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5194 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5198 if (cp->addr.type == BDADDR_BREDR)
5199 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5202 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5204 if (!conn || conn->state != BT_CONNECTED) {
5205 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5206 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5210 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5211 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5212 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5216 /* To avoid client trying to guess when to poll again for information we
5217 * calculate conn info age as random value between min/max set in hdev.
5219 conn_info_age = hdev->conn_info_min_age +
5220 prandom_u32_max(hdev->conn_info_max_age -
5221 hdev->conn_info_min_age);
5223 /* Query controller to refresh cached values if they are too old or were
5226 if (time_after(jiffies, conn->conn_info_timestamp +
5227 msecs_to_jiffies(conn_info_age)) ||
5228 !conn->conn_info_timestamp) {
5229 struct hci_request req;
5230 struct hci_cp_read_tx_power req_txp_cp;
5231 struct hci_cp_read_rssi req_rssi_cp;
5232 struct pending_cmd *cmd;
5234 hci_req_init(&req, hdev);
5235 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5236 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5239 /* For LE links TX power does not change thus we don't need to
5240 * query for it once value is known.
5242 if (!bdaddr_type_is_le(cp->addr.type) ||
5243 conn->tx_power == HCI_TX_POWER_INVALID) {
5244 req_txp_cp.handle = cpu_to_le16(conn->handle);
5245 req_txp_cp.type = 0x00;
5246 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5247 sizeof(req_txp_cp), &req_txp_cp);
5250 /* Max TX power needs to be read only once per connection */
5251 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5252 req_txp_cp.handle = cpu_to_le16(conn->handle);
5253 req_txp_cp.type = 0x01;
5254 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5255 sizeof(req_txp_cp), &req_txp_cp);
5258 err = hci_req_run(&req, conn_info_refresh_complete);
5262 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5269 hci_conn_hold(conn);
5270 cmd->user_data = hci_conn_get(conn);
5271 cmd->cmd_complete = conn_info_cmd_complete;
5273 conn->conn_info_timestamp = jiffies;
5275 /* Cache is valid, just reply with values cached in hci_conn */
5276 rp.rssi = conn->rssi;
5277 rp.tx_power = conn->tx_power;
5278 rp.max_tx_power = conn->max_tx_power;
5280 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5281 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5285 hci_dev_unlock(hdev);
5289 static void clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5291 struct hci_conn *conn = cmd->user_data;
5292 struct mgmt_rp_get_clock_info rp;
5293 struct hci_dev *hdev;
5295 memset(&rp, 0, sizeof(rp));
5296 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5301 hdev = hci_dev_get(cmd->index);
5303 rp.local_clock = cpu_to_le32(hdev->clock);
5308 rp.piconet_clock = cpu_to_le32(conn->clock);
5309 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5313 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, sizeof(rp));
5316 hci_conn_drop(conn);
5321 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5323 struct hci_cp_read_clock *hci_cp;
5324 struct pending_cmd *cmd;
5325 struct hci_conn *conn;
5327 BT_DBG("%s status %u", hdev->name, status);
5331 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5335 if (hci_cp->which) {
5336 u16 handle = __le16_to_cpu(hci_cp->handle);
5337 conn = hci_conn_hash_lookup_handle(hdev, handle);
5342 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5346 cmd->cmd_complete(cmd, mgmt_status(status));
5347 mgmt_pending_remove(cmd);
5350 hci_dev_unlock(hdev);
5353 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5356 struct mgmt_cp_get_clock_info *cp = data;
5357 struct mgmt_rp_get_clock_info rp;
5358 struct hci_cp_read_clock hci_cp;
5359 struct pending_cmd *cmd;
5360 struct hci_request req;
5361 struct hci_conn *conn;
5364 BT_DBG("%s", hdev->name);
5366 memset(&rp, 0, sizeof(rp));
5367 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5368 rp.addr.type = cp->addr.type;
5370 if (cp->addr.type != BDADDR_BREDR)
5371 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5372 MGMT_STATUS_INVALID_PARAMS,
5377 if (!hdev_is_powered(hdev)) {
5378 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5379 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5383 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5384 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5386 if (!conn || conn->state != BT_CONNECTED) {
5387 err = cmd_complete(sk, hdev->id,
5388 MGMT_OP_GET_CLOCK_INFO,
5389 MGMT_STATUS_NOT_CONNECTED,
5397 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5403 cmd->cmd_complete = clock_info_cmd_complete;
5405 hci_req_init(&req, hdev);
5407 memset(&hci_cp, 0, sizeof(hci_cp));
5408 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5411 hci_conn_hold(conn);
5412 cmd->user_data = hci_conn_get(conn);
5414 hci_cp.handle = cpu_to_le16(conn->handle);
5415 hci_cp.which = 0x01; /* Piconet clock */
5416 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5419 err = hci_req_run(&req, get_clock_info_complete);
5421 mgmt_pending_remove(cmd);
5424 hci_dev_unlock(hdev);
5428 static void device_added(struct sock *sk, struct hci_dev *hdev,
5429 bdaddr_t *bdaddr, u8 type, u8 action)
5431 struct mgmt_ev_device_added ev;
5433 bacpy(&ev.addr.bdaddr, bdaddr);
5434 ev.addr.type = type;
5437 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5440 static int add_device(struct sock *sk, struct hci_dev *hdev,
5441 void *data, u16 len)
5443 struct mgmt_cp_add_device *cp = data;
5444 u8 auto_conn, addr_type;
5447 BT_DBG("%s", hdev->name);
5449 if (!bdaddr_type_is_valid(cp->addr.type) ||
5450 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5451 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5452 MGMT_STATUS_INVALID_PARAMS,
5453 &cp->addr, sizeof(cp->addr));
5455 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5456 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5457 MGMT_STATUS_INVALID_PARAMS,
5458 &cp->addr, sizeof(cp->addr));
5462 if (cp->addr.type == BDADDR_BREDR) {
5463 /* Only incoming connections action is supported for now */
5464 if (cp->action != 0x01) {
5465 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5466 MGMT_STATUS_INVALID_PARAMS,
5467 &cp->addr, sizeof(cp->addr));
5471 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5476 hci_update_page_scan(hdev, NULL);
5481 if (cp->addr.type == BDADDR_LE_PUBLIC)
5482 addr_type = ADDR_LE_DEV_PUBLIC;
5484 addr_type = ADDR_LE_DEV_RANDOM;
5486 if (cp->action == 0x02)
5487 auto_conn = HCI_AUTO_CONN_ALWAYS;
5488 else if (cp->action == 0x01)
5489 auto_conn = HCI_AUTO_CONN_DIRECT;
5491 auto_conn = HCI_AUTO_CONN_REPORT;
5493 /* If the connection parameters don't exist for this device,
5494 * they will be created and configured with defaults.
5496 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5498 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5500 &cp->addr, sizeof(cp->addr));
5505 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5507 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5508 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5511 hci_dev_unlock(hdev);
5515 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5516 bdaddr_t *bdaddr, u8 type)
5518 struct mgmt_ev_device_removed ev;
5520 bacpy(&ev.addr.bdaddr, bdaddr);
5521 ev.addr.type = type;
5523 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5526 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5527 void *data, u16 len)
5529 struct mgmt_cp_remove_device *cp = data;
5532 BT_DBG("%s", hdev->name);
5536 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5537 struct hci_conn_params *params;
5540 if (!bdaddr_type_is_valid(cp->addr.type)) {
5541 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5542 MGMT_STATUS_INVALID_PARAMS,
5543 &cp->addr, sizeof(cp->addr));
5547 if (cp->addr.type == BDADDR_BREDR) {
5548 err = hci_bdaddr_list_del(&hdev->whitelist,
5552 err = cmd_complete(sk, hdev->id,
5553 MGMT_OP_REMOVE_DEVICE,
5554 MGMT_STATUS_INVALID_PARAMS,
5555 &cp->addr, sizeof(cp->addr));
5559 hci_update_page_scan(hdev, NULL);
5561 device_removed(sk, hdev, &cp->addr.bdaddr,
5566 if (cp->addr.type == BDADDR_LE_PUBLIC)
5567 addr_type = ADDR_LE_DEV_PUBLIC;
5569 addr_type = ADDR_LE_DEV_RANDOM;
5571 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5574 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5575 MGMT_STATUS_INVALID_PARAMS,
5576 &cp->addr, sizeof(cp->addr));
5580 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5581 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5582 MGMT_STATUS_INVALID_PARAMS,
5583 &cp->addr, sizeof(cp->addr));
5587 list_del(¶ms->action);
5588 list_del(¶ms->list);
5590 hci_update_background_scan(hdev);
5592 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5594 struct hci_conn_params *p, *tmp;
5595 struct bdaddr_list *b, *btmp;
5597 if (cp->addr.type) {
5598 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5599 MGMT_STATUS_INVALID_PARAMS,
5600 &cp->addr, sizeof(cp->addr));
5604 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5605 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5610 hci_update_page_scan(hdev, NULL);
5612 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5613 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5615 device_removed(sk, hdev, &p->addr, p->addr_type);
5616 list_del(&p->action);
5621 BT_DBG("All LE connection parameters were removed");
5623 hci_update_background_scan(hdev);
5627 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5628 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5631 hci_dev_unlock(hdev);
5635 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5638 struct mgmt_cp_load_conn_param *cp = data;
5639 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5640 sizeof(struct mgmt_conn_param));
5641 u16 param_count, expected_len;
5644 if (!lmp_le_capable(hdev))
5645 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5646 MGMT_STATUS_NOT_SUPPORTED);
5648 param_count = __le16_to_cpu(cp->param_count);
5649 if (param_count > max_param_count) {
5650 BT_ERR("load_conn_param: too big param_count value %u",
5652 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5653 MGMT_STATUS_INVALID_PARAMS);
5656 expected_len = sizeof(*cp) + param_count *
5657 sizeof(struct mgmt_conn_param);
5658 if (expected_len != len) {
5659 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5661 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5662 MGMT_STATUS_INVALID_PARAMS);
5665 BT_DBG("%s param_count %u", hdev->name, param_count);
5669 hci_conn_params_clear_disabled(hdev);
5671 for (i = 0; i < param_count; i++) {
5672 struct mgmt_conn_param *param = &cp->params[i];
5673 struct hci_conn_params *hci_param;
5674 u16 min, max, latency, timeout;
5677 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5680 if (param->addr.type == BDADDR_LE_PUBLIC) {
5681 addr_type = ADDR_LE_DEV_PUBLIC;
5682 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5683 addr_type = ADDR_LE_DEV_RANDOM;
5685 BT_ERR("Ignoring invalid connection parameters");
5689 min = le16_to_cpu(param->min_interval);
5690 max = le16_to_cpu(param->max_interval);
5691 latency = le16_to_cpu(param->latency);
5692 timeout = le16_to_cpu(param->timeout);
5694 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5695 min, max, latency, timeout);
5697 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5698 BT_ERR("Ignoring invalid connection parameters");
5702 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5705 BT_ERR("Failed to add connection parameters");
5709 hci_param->conn_min_interval = min;
5710 hci_param->conn_max_interval = max;
5711 hci_param->conn_latency = latency;
5712 hci_param->supervision_timeout = timeout;
5715 hci_dev_unlock(hdev);
5717 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5720 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5721 void *data, u16 len)
5723 struct mgmt_cp_set_external_config *cp = data;
5727 BT_DBG("%s", hdev->name);
5729 if (hdev_is_powered(hdev))
5730 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5731 MGMT_STATUS_REJECTED);
5733 if (cp->config != 0x00 && cp->config != 0x01)
5734 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5735 MGMT_STATUS_INVALID_PARAMS);
5737 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5738 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5739 MGMT_STATUS_NOT_SUPPORTED);
5744 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5747 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5750 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5757 err = new_options(hdev, sk);
5759 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5760 mgmt_index_removed(hdev);
5762 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5763 set_bit(HCI_CONFIG, &hdev->dev_flags);
5764 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5766 queue_work(hdev->req_workqueue, &hdev->power_on);
5768 set_bit(HCI_RAW, &hdev->flags);
5769 mgmt_index_added(hdev);
5774 hci_dev_unlock(hdev);
5778 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5779 void *data, u16 len)
5781 struct mgmt_cp_set_public_address *cp = data;
5785 BT_DBG("%s", hdev->name);
5787 if (hdev_is_powered(hdev))
5788 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5789 MGMT_STATUS_REJECTED);
5791 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5792 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5793 MGMT_STATUS_INVALID_PARAMS);
5795 if (!hdev->set_bdaddr)
5796 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5797 MGMT_STATUS_NOT_SUPPORTED);
5801 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5802 bacpy(&hdev->public_addr, &cp->bdaddr);
5804 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5811 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5812 err = new_options(hdev, sk);
5814 if (is_configured(hdev)) {
5815 mgmt_index_removed(hdev);
5817 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5819 set_bit(HCI_CONFIG, &hdev->dev_flags);
5820 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5822 queue_work(hdev->req_workqueue, &hdev->power_on);
5826 hci_dev_unlock(hdev);
5830 static const struct mgmt_handler {
5831 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5835 } mgmt_handlers[] = {
5836 { NULL }, /* 0x0000 (no command) */
5837 { read_version, false, MGMT_READ_VERSION_SIZE },
5838 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5839 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5840 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5841 { set_powered, false, MGMT_SETTING_SIZE },
5842 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5843 { set_connectable, false, MGMT_SETTING_SIZE },
5844 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5845 { set_bondable, false, MGMT_SETTING_SIZE },
5846 { set_link_security, false, MGMT_SETTING_SIZE },
5847 { set_ssp, false, MGMT_SETTING_SIZE },
5848 { set_hs, false, MGMT_SETTING_SIZE },
5849 { set_le, false, MGMT_SETTING_SIZE },
5850 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5851 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5852 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5853 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5854 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5855 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5856 { disconnect, false, MGMT_DISCONNECT_SIZE },
5857 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5858 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5859 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5860 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5861 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5862 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5863 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5864 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5865 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5866 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5867 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5868 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5869 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5870 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5871 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5872 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5873 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5874 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5875 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5876 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5877 { set_advertising, false, MGMT_SETTING_SIZE },
5878 { set_bredr, false, MGMT_SETTING_SIZE },
5879 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5880 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5881 { set_secure_conn, false, MGMT_SETTING_SIZE },
5882 { set_debug_keys, false, MGMT_SETTING_SIZE },
5883 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5884 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5885 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5886 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5887 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5888 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5889 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5890 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5891 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5892 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5893 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5894 { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
5897 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5901 struct mgmt_hdr *hdr;
5902 u16 opcode, index, len;
5903 struct hci_dev *hdev = NULL;
5904 const struct mgmt_handler *handler;
5907 BT_DBG("got %zu bytes", msglen);
5909 if (msglen < sizeof(*hdr))
5912 buf = kmalloc(msglen, GFP_KERNEL);
5916 if (memcpy_from_msg(buf, msg, msglen)) {
5922 opcode = __le16_to_cpu(hdr->opcode);
5923 index = __le16_to_cpu(hdr->index);
5924 len = __le16_to_cpu(hdr->len);
5926 if (len != msglen - sizeof(*hdr)) {
5931 if (index != MGMT_INDEX_NONE) {
5932 hdev = hci_dev_get(index);
5934 err = cmd_status(sk, index, opcode,
5935 MGMT_STATUS_INVALID_INDEX);
5939 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5940 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5941 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5942 err = cmd_status(sk, index, opcode,
5943 MGMT_STATUS_INVALID_INDEX);
5947 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5948 opcode != MGMT_OP_READ_CONFIG_INFO &&
5949 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5950 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5951 err = cmd_status(sk, index, opcode,
5952 MGMT_STATUS_INVALID_INDEX);
5957 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5958 mgmt_handlers[opcode].func == NULL) {
5959 BT_DBG("Unknown op %u", opcode);
5960 err = cmd_status(sk, index, opcode,
5961 MGMT_STATUS_UNKNOWN_COMMAND);
5965 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5966 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5967 err = cmd_status(sk, index, opcode,
5968 MGMT_STATUS_INVALID_INDEX);
5972 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5973 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5974 err = cmd_status(sk, index, opcode,
5975 MGMT_STATUS_INVALID_INDEX);
5979 handler = &mgmt_handlers[opcode];
5981 if ((handler->var_len && len < handler->data_len) ||
5982 (!handler->var_len && len != handler->data_len)) {
5983 err = cmd_status(sk, index, opcode,
5984 MGMT_STATUS_INVALID_PARAMS);
5989 mgmt_init_hdev(sk, hdev);
5991 cp = buf + sizeof(*hdr);
5993 err = handler->func(sk, hdev, cp, len);
6007 void mgmt_index_added(struct hci_dev *hdev)
6009 if (hdev->dev_type != HCI_BREDR)
6012 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6015 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6016 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6018 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6021 void mgmt_index_removed(struct hci_dev *hdev)
6023 u8 status = MGMT_STATUS_INVALID_INDEX;
6025 if (hdev->dev_type != HCI_BREDR)
6028 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6031 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6033 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6034 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6036 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6039 /* This function requires the caller holds hdev->lock */
6040 static void restart_le_actions(struct hci_dev *hdev)
6042 struct hci_conn_params *p;
6044 list_for_each_entry(p, &hdev->le_conn_params, list) {
6045 /* Needed for AUTO_OFF case where might not "really"
6046 * have been powered off.
6048 list_del_init(&p->action);
6050 switch (p->auto_connect) {
6051 case HCI_AUTO_CONN_DIRECT:
6052 case HCI_AUTO_CONN_ALWAYS:
6053 list_add(&p->action, &hdev->pend_le_conns);
6055 case HCI_AUTO_CONN_REPORT:
6056 list_add(&p->action, &hdev->pend_le_reports);
6063 hci_update_background_scan(hdev);
6066 static void powered_complete(struct hci_dev *hdev, u8 status)
6068 struct cmd_lookup match = { NULL, hdev };
6070 BT_DBG("status 0x%02x", status);
6074 restart_le_actions(hdev);
6076 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6078 new_settings(hdev, match.sk);
6080 hci_dev_unlock(hdev);
6086 static int powered_update_hci(struct hci_dev *hdev)
6088 struct hci_request req;
6091 hci_req_init(&req, hdev);
6093 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6094 !lmp_host_ssp_capable(hdev)) {
6097 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
6100 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6102 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, sizeof(sc), &sc);
6105 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6106 lmp_bredr_capable(hdev)) {
6107 struct hci_cp_write_le_host_supported cp;
6112 /* Check first if we already have the right
6113 * host state (host features set)
6115 if (cp.le != lmp_host_le_capable(hdev) ||
6116 cp.simul != lmp_host_le_br_capable(hdev))
6117 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6121 if (lmp_le_capable(hdev)) {
6122 /* Make sure the controller has a good default for
6123 * advertising data. This also applies to the case
6124 * where BR/EDR was toggled during the AUTO_OFF phase.
6126 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6127 update_adv_data(&req);
6128 update_scan_rsp_data(&req);
6131 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6132 enable_advertising(&req);
6135 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6136 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6137 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6138 sizeof(link_sec), &link_sec);
6140 if (lmp_bredr_capable(hdev)) {
6141 write_fast_connectable(&req, false);
6142 hci_update_page_scan(hdev, &req);
6148 return hci_req_run(&req, powered_complete);
6151 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6153 struct cmd_lookup match = { NULL, hdev };
6154 u8 status, zero_cod[] = { 0, 0, 0 };
6157 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6161 if (powered_update_hci(hdev) == 0)
6164 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6169 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6171 /* If the power off is because of hdev unregistration let
6172 * use the appropriate INVALID_INDEX status. Otherwise use
6173 * NOT_POWERED. We cover both scenarios here since later in
6174 * mgmt_index_removed() any hci_conn callbacks will have already
6175 * been triggered, potentially causing misleading DISCONNECTED
6178 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
6179 status = MGMT_STATUS_INVALID_INDEX;
6181 status = MGMT_STATUS_NOT_POWERED;
6183 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6185 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6186 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6187 zero_cod, sizeof(zero_cod), NULL);
6190 err = new_settings(hdev, match.sk);
6198 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6200 struct pending_cmd *cmd;
6203 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6207 if (err == -ERFKILL)
6208 status = MGMT_STATUS_RFKILLED;
6210 status = MGMT_STATUS_FAILED;
6212 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6214 mgmt_pending_remove(cmd);
6217 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6219 struct hci_request req;
6223 /* When discoverable timeout triggers, then just make sure
6224 * the limited discoverable flag is cleared. Even in the case
6225 * of a timeout triggered from general discoverable, it is
6226 * safe to unconditionally clear the flag.
6228 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6229 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6231 hci_req_init(&req, hdev);
6232 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6233 u8 scan = SCAN_PAGE;
6234 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6235 sizeof(scan), &scan);
6238 update_adv_data(&req);
6239 hci_req_run(&req, NULL);
6241 hdev->discov_timeout = 0;
6243 new_settings(hdev, NULL);
6245 hci_dev_unlock(hdev);
6248 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6251 struct mgmt_ev_new_link_key ev;
6253 memset(&ev, 0, sizeof(ev));
6255 ev.store_hint = persistent;
6256 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6257 ev.key.addr.type = BDADDR_BREDR;
6258 ev.key.type = key->type;
6259 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6260 ev.key.pin_len = key->pin_len;
6262 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6265 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6267 switch (ltk->type) {
6270 if (ltk->authenticated)
6271 return MGMT_LTK_AUTHENTICATED;
6272 return MGMT_LTK_UNAUTHENTICATED;
6274 if (ltk->authenticated)
6275 return MGMT_LTK_P256_AUTH;
6276 return MGMT_LTK_P256_UNAUTH;
6277 case SMP_LTK_P256_DEBUG:
6278 return MGMT_LTK_P256_DEBUG;
6281 return MGMT_LTK_UNAUTHENTICATED;
6284 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6286 struct mgmt_ev_new_long_term_key ev;
6288 memset(&ev, 0, sizeof(ev));
6290 /* Devices using resolvable or non-resolvable random addresses
6291 * without providing an indentity resolving key don't require
6292 * to store long term keys. Their addresses will change the
6295 * Only when a remote device provides an identity address
6296 * make sure the long term key is stored. If the remote
6297 * identity is known, the long term keys are internally
6298 * mapped to the identity address. So allow static random
6299 * and public addresses here.
6301 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6302 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6303 ev.store_hint = 0x00;
6305 ev.store_hint = persistent;
6307 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6308 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6309 ev.key.type = mgmt_ltk_type(key);
6310 ev.key.enc_size = key->enc_size;
6311 ev.key.ediv = key->ediv;
6312 ev.key.rand = key->rand;
6314 if (key->type == SMP_LTK)
6317 memcpy(ev.key.val, key->val, sizeof(key->val));
6319 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6322 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6324 struct mgmt_ev_new_irk ev;
6326 memset(&ev, 0, sizeof(ev));
6328 /* For identity resolving keys from devices that are already
6329 * using a public address or static random address, do not
6330 * ask for storing this key. The identity resolving key really
6331 * is only mandatory for devices using resovlable random
6334 * Storing all identity resolving keys has the downside that
6335 * they will be also loaded on next boot of they system. More
6336 * identity resolving keys, means more time during scanning is
6337 * needed to actually resolve these addresses.
6339 if (bacmp(&irk->rpa, BDADDR_ANY))
6340 ev.store_hint = 0x01;
6342 ev.store_hint = 0x00;
6344 bacpy(&ev.rpa, &irk->rpa);
6345 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6346 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6347 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6349 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6352 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6355 struct mgmt_ev_new_csrk ev;
6357 memset(&ev, 0, sizeof(ev));
6359 /* Devices using resolvable or non-resolvable random addresses
6360 * without providing an indentity resolving key don't require
6361 * to store signature resolving keys. Their addresses will change
6362 * the next time around.
6364 * Only when a remote device provides an identity address
6365 * make sure the signature resolving key is stored. So allow
6366 * static random and public addresses here.
6368 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6369 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6370 ev.store_hint = 0x00;
6372 ev.store_hint = persistent;
6374 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6375 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6376 ev.key.master = csrk->master;
6377 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6379 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6382 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6383 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6384 u16 max_interval, u16 latency, u16 timeout)
6386 struct mgmt_ev_new_conn_param ev;
6388 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6391 memset(&ev, 0, sizeof(ev));
6392 bacpy(&ev.addr.bdaddr, bdaddr);
6393 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6394 ev.store_hint = store_hint;
6395 ev.min_interval = cpu_to_le16(min_interval);
6396 ev.max_interval = cpu_to_le16(max_interval);
6397 ev.latency = cpu_to_le16(latency);
6398 ev.timeout = cpu_to_le16(timeout);
6400 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6403 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6406 eir[eir_len++] = sizeof(type) + data_len;
6407 eir[eir_len++] = type;
6408 memcpy(&eir[eir_len], data, data_len);
6409 eir_len += data_len;
6414 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6415 u32 flags, u8 *name, u8 name_len)
6418 struct mgmt_ev_device_connected *ev = (void *) buf;
6421 bacpy(&ev->addr.bdaddr, &conn->dst);
6422 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6424 ev->flags = __cpu_to_le32(flags);
6426 /* We must ensure that the EIR Data fields are ordered and
6427 * unique. Keep it simple for now and avoid the problem by not
6428 * adding any BR/EDR data to the LE adv.
6430 if (conn->le_adv_data_len > 0) {
6431 memcpy(&ev->eir[eir_len],
6432 conn->le_adv_data, conn->le_adv_data_len);
6433 eir_len = conn->le_adv_data_len;
6436 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6439 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6440 eir_len = eir_append_data(ev->eir, eir_len,
6442 conn->dev_class, 3);
6445 ev->eir_len = cpu_to_le16(eir_len);
6447 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6448 sizeof(*ev) + eir_len, NULL);
6451 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6453 struct sock **sk = data;
6455 cmd->cmd_complete(cmd, 0);
6460 mgmt_pending_remove(cmd);
6463 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6465 struct hci_dev *hdev = data;
6466 struct mgmt_cp_unpair_device *cp = cmd->param;
6468 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6470 cmd->cmd_complete(cmd, 0);
6471 mgmt_pending_remove(cmd);
6474 bool mgmt_powering_down(struct hci_dev *hdev)
6476 struct pending_cmd *cmd;
6477 struct mgmt_mode *cp;
6479 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6490 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6491 u8 link_type, u8 addr_type, u8 reason,
6492 bool mgmt_connected)
6494 struct mgmt_ev_device_disconnected ev;
6495 struct sock *sk = NULL;
6497 /* The connection is still in hci_conn_hash so test for 1
6498 * instead of 0 to know if this is the last one.
6500 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6501 cancel_delayed_work(&hdev->power_off);
6502 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6505 if (!mgmt_connected)
6508 if (link_type != ACL_LINK && link_type != LE_LINK)
6511 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6513 bacpy(&ev.addr.bdaddr, bdaddr);
6514 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6517 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6522 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6526 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6527 u8 link_type, u8 addr_type, u8 status)
6529 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6530 struct mgmt_cp_disconnect *cp;
6531 struct pending_cmd *cmd;
6533 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6536 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6542 if (bacmp(bdaddr, &cp->addr.bdaddr))
6545 if (cp->addr.type != bdaddr_type)
6548 cmd->cmd_complete(cmd, mgmt_status(status));
6549 mgmt_pending_remove(cmd);
6552 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6553 u8 addr_type, u8 status)
6555 struct mgmt_ev_connect_failed ev;
6557 /* The connection is still in hci_conn_hash so test for 1
6558 * instead of 0 to know if this is the last one.
6560 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6561 cancel_delayed_work(&hdev->power_off);
6562 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6565 bacpy(&ev.addr.bdaddr, bdaddr);
6566 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6567 ev.status = mgmt_status(status);
6569 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6572 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6574 struct mgmt_ev_pin_code_request ev;
6576 bacpy(&ev.addr.bdaddr, bdaddr);
6577 ev.addr.type = BDADDR_BREDR;
6580 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6583 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6586 struct pending_cmd *cmd;
6588 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6592 cmd->cmd_complete(cmd, mgmt_status(status));
6593 mgmt_pending_remove(cmd);
6596 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6599 struct pending_cmd *cmd;
6601 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6605 cmd->cmd_complete(cmd, mgmt_status(status));
6606 mgmt_pending_remove(cmd);
6609 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6610 u8 link_type, u8 addr_type, u32 value,
6613 struct mgmt_ev_user_confirm_request ev;
6615 BT_DBG("%s", hdev->name);
6617 bacpy(&ev.addr.bdaddr, bdaddr);
6618 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6619 ev.confirm_hint = confirm_hint;
6620 ev.value = cpu_to_le32(value);
6622 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6626 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6627 u8 link_type, u8 addr_type)
6629 struct mgmt_ev_user_passkey_request ev;
6631 BT_DBG("%s", hdev->name);
6633 bacpy(&ev.addr.bdaddr, bdaddr);
6634 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6636 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6640 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6641 u8 link_type, u8 addr_type, u8 status,
6644 struct pending_cmd *cmd;
6646 cmd = mgmt_pending_find(opcode, hdev);
6650 cmd->cmd_complete(cmd, mgmt_status(status));
6651 mgmt_pending_remove(cmd);
6656 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6657 u8 link_type, u8 addr_type, u8 status)
6659 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6660 status, MGMT_OP_USER_CONFIRM_REPLY);
6663 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6664 u8 link_type, u8 addr_type, u8 status)
6666 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6668 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6671 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6672 u8 link_type, u8 addr_type, u8 status)
6674 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6675 status, MGMT_OP_USER_PASSKEY_REPLY);
6678 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6679 u8 link_type, u8 addr_type, u8 status)
6681 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6683 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6686 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6687 u8 link_type, u8 addr_type, u32 passkey,
6690 struct mgmt_ev_passkey_notify ev;
6692 BT_DBG("%s", hdev->name);
6694 bacpy(&ev.addr.bdaddr, bdaddr);
6695 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6696 ev.passkey = __cpu_to_le32(passkey);
6697 ev.entered = entered;
6699 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6702 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6704 struct mgmt_ev_auth_failed ev;
6705 struct pending_cmd *cmd;
6706 u8 status = mgmt_status(hci_status);
6708 bacpy(&ev.addr.bdaddr, &conn->dst);
6709 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6712 cmd = find_pairing(conn);
6714 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6715 cmd ? cmd->sk : NULL);
6718 cmd->cmd_complete(cmd, status);
6719 mgmt_pending_remove(cmd);
6723 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6725 struct cmd_lookup match = { NULL, hdev };
6729 u8 mgmt_err = mgmt_status(status);
6730 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6731 cmd_status_rsp, &mgmt_err);
6735 if (test_bit(HCI_AUTH, &hdev->flags))
6736 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6739 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6742 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6746 new_settings(hdev, match.sk);
6752 static void clear_eir(struct hci_request *req)
6754 struct hci_dev *hdev = req->hdev;
6755 struct hci_cp_write_eir cp;
6757 if (!lmp_ext_inq_capable(hdev))
6760 memset(hdev->eir, 0, sizeof(hdev->eir));
6762 memset(&cp, 0, sizeof(cp));
6764 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6767 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6769 struct cmd_lookup match = { NULL, hdev };
6770 struct hci_request req;
6771 bool changed = false;
6774 u8 mgmt_err = mgmt_status(status);
6776 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6777 &hdev->dev_flags)) {
6778 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6779 new_settings(hdev, NULL);
6782 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6788 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6790 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6792 changed = test_and_clear_bit(HCI_HS_ENABLED,
6795 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6798 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6801 new_settings(hdev, match.sk);
6806 hci_req_init(&req, hdev);
6808 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6809 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6810 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6811 sizeof(enable), &enable);
6817 hci_req_run(&req, NULL);
6820 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6822 struct cmd_lookup match = { NULL, hdev };
6823 bool changed = false;
6826 u8 mgmt_err = mgmt_status(status);
6829 if (test_and_clear_bit(HCI_SC_ENABLED,
6831 new_settings(hdev, NULL);
6832 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6835 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6836 cmd_status_rsp, &mgmt_err);
6841 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6843 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6844 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6847 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6848 settings_rsp, &match);
6851 new_settings(hdev, match.sk);
6857 static void sk_lookup(struct pending_cmd *cmd, void *data)
6859 struct cmd_lookup *match = data;
6861 if (match->sk == NULL) {
6862 match->sk = cmd->sk;
6863 sock_hold(match->sk);
6867 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6870 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6872 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6873 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6874 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6877 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6884 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6886 struct mgmt_cp_set_local_name ev;
6887 struct pending_cmd *cmd;
6892 memset(&ev, 0, sizeof(ev));
6893 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6894 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6896 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6898 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6900 /* If this is a HCI command related to powering on the
6901 * HCI dev don't send any mgmt signals.
6903 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6907 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6908 cmd ? cmd->sk : NULL);
6911 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6912 u8 *rand192, u8 *hash256, u8 *rand256,
6915 struct pending_cmd *cmd;
6917 BT_DBG("%s status %u", hdev->name, status);
6919 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6924 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6925 mgmt_status(status));
6927 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
6928 struct mgmt_rp_read_local_oob_ext_data rp;
6930 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6931 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
6933 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6934 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
6936 cmd_complete(cmd->sk, hdev->id,
6937 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6940 struct mgmt_rp_read_local_oob_data rp;
6942 memcpy(rp.hash, hash192, sizeof(rp.hash));
6943 memcpy(rp.rand, rand192, sizeof(rp.rand));
6945 cmd_complete(cmd->sk, hdev->id,
6946 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6951 mgmt_pending_remove(cmd);
6954 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
6958 for (i = 0; i < uuid_count; i++) {
6959 if (!memcmp(uuid, uuids[i], 16))
6966 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
6970 while (parsed < eir_len) {
6971 u8 field_len = eir[0];
6978 if (eir_len - parsed < field_len + 1)
6982 case EIR_UUID16_ALL:
6983 case EIR_UUID16_SOME:
6984 for (i = 0; i + 3 <= field_len; i += 2) {
6985 memcpy(uuid, bluetooth_base_uuid, 16);
6986 uuid[13] = eir[i + 3];
6987 uuid[12] = eir[i + 2];
6988 if (has_uuid(uuid, uuid_count, uuids))
6992 case EIR_UUID32_ALL:
6993 case EIR_UUID32_SOME:
6994 for (i = 0; i + 5 <= field_len; i += 4) {
6995 memcpy(uuid, bluetooth_base_uuid, 16);
6996 uuid[15] = eir[i + 5];
6997 uuid[14] = eir[i + 4];
6998 uuid[13] = eir[i + 3];
6999 uuid[12] = eir[i + 2];
7000 if (has_uuid(uuid, uuid_count, uuids))
7004 case EIR_UUID128_ALL:
7005 case EIR_UUID128_SOME:
7006 for (i = 0; i + 17 <= field_len; i += 16) {
7007 memcpy(uuid, eir + i + 2, 16);
7008 if (has_uuid(uuid, uuid_count, uuids))
7014 parsed += field_len + 1;
7015 eir += field_len + 1;
7021 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7022 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7023 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7026 struct mgmt_ev_device_found *ev = (void *) buf;
7030 /* Don't send events for a non-kernel initiated discovery. With
7031 * LE one exception is if we have pend_le_reports > 0 in which
7032 * case we're doing passive scanning and want these events.
7034 if (!hci_discovery_active(hdev)) {
7035 if (link_type == ACL_LINK)
7037 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7041 /* When using service discovery with a RSSI threshold, then check
7042 * if such a RSSI threshold is specified. If a RSSI threshold has
7043 * been specified, then all results with a RSSI smaller than the
7044 * RSSI threshold will be dropped.
7046 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7047 * the results are also dropped.
7049 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7050 (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID))
7053 /* Make sure that the buffer is big enough. The 5 extra bytes
7054 * are for the potential CoD field.
7056 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7059 memset(buf, 0, sizeof(buf));
7061 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7062 * RSSI value was reported as 0 when not available. This behavior
7063 * is kept when using device discovery. This is required for full
7064 * backwards compatibility with the API.
7066 * However when using service discovery, the value 127 will be
7067 * returned when the RSSI is not available.
7069 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi)
7072 bacpy(&ev->addr.bdaddr, bdaddr);
7073 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7075 ev->flags = cpu_to_le32(flags);
7078 /* When using service discovery and a list of UUID is
7079 * provided, results with no matching UUID should be
7080 * dropped. In case there is a match the result is
7081 * kept and checking possible scan response data
7084 if (hdev->discovery.uuid_count > 0)
7085 match = eir_has_uuids(eir, eir_len,
7086 hdev->discovery.uuid_count,
7087 hdev->discovery.uuids);
7091 if (!match && !scan_rsp_len)
7094 /* Copy EIR or advertising data into event */
7095 memcpy(ev->eir, eir, eir_len);
7097 /* When using service discovery and a list of UUID is
7098 * provided, results with empty EIR or advertising data
7099 * should be dropped since they do not match any UUID.
7101 if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
7107 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7108 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7111 if (scan_rsp_len > 0) {
7112 /* When using service discovery and a list of UUID is
7113 * provided, results with no matching UUID should be
7114 * dropped if there is no previous match from the
7117 if (hdev->discovery.uuid_count > 0) {
7118 if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
7119 hdev->discovery.uuid_count,
7120 hdev->discovery.uuids))
7124 /* Append scan response data to event */
7125 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7127 /* When using service discovery and a list of UUID is
7128 * provided, results with empty scan response and no
7129 * previous matched advertising data should be dropped.
7131 if (hdev->discovery.uuid_count > 0 && !match)
7135 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7136 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7138 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7141 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7142 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7144 struct mgmt_ev_device_found *ev;
7145 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7148 ev = (struct mgmt_ev_device_found *) buf;
7150 memset(buf, 0, sizeof(buf));
7152 bacpy(&ev->addr.bdaddr, bdaddr);
7153 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7156 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7159 ev->eir_len = cpu_to_le16(eir_len);
7161 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7164 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7166 struct mgmt_ev_discovering ev;
7168 BT_DBG("%s discovering %u", hdev->name, discovering);
7170 memset(&ev, 0, sizeof(ev));
7171 ev.type = hdev->discovery.type;
7172 ev.discovering = discovering;
7174 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7177 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
7179 BT_DBG("%s status %u", hdev->name, status);
7182 void mgmt_reenable_advertising(struct hci_dev *hdev)
7184 struct hci_request req;
7186 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7189 hci_req_init(&req, hdev);
7190 enable_advertising(&req);
7191 hci_req_run(&req, adv_enable_complete);