2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 5
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_LINK_SECURITY,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
82 MGMT_OP_SET_SECURE_CONN,
83 MGMT_OP_SET_DEBUG_KEYS,
88 static const u16 mgmt_events[] = {
89 MGMT_EV_CONTROLLER_ERROR,
91 MGMT_EV_INDEX_REMOVED,
93 MGMT_EV_CLASS_OF_DEV_CHANGED,
94 MGMT_EV_LOCAL_NAME_CHANGED,
96 MGMT_EV_NEW_LONG_TERM_KEY,
97 MGMT_EV_DEVICE_CONNECTED,
98 MGMT_EV_DEVICE_DISCONNECTED,
99 MGMT_EV_CONNECT_FAILED,
100 MGMT_EV_PIN_CODE_REQUEST,
101 MGMT_EV_USER_CONFIRM_REQUEST,
102 MGMT_EV_USER_PASSKEY_REQUEST,
104 MGMT_EV_DEVICE_FOUND,
106 MGMT_EV_DEVICE_BLOCKED,
107 MGMT_EV_DEVICE_UNBLOCKED,
108 MGMT_EV_DEVICE_UNPAIRED,
109 MGMT_EV_PASSKEY_NOTIFY,
114 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
116 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
117 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
120 struct list_head list;
128 /* HCI to MGMT error code conversion table */
129 static u8 mgmt_status_table[] = {
131 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
132 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
133 MGMT_STATUS_FAILED, /* Hardware Failure */
134 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
135 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
136 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
137 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
138 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
139 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
140 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
141 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
142 MGMT_STATUS_BUSY, /* Command Disallowed */
143 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
144 MGMT_STATUS_REJECTED, /* Rejected Security */
145 MGMT_STATUS_REJECTED, /* Rejected Personal */
146 MGMT_STATUS_TIMEOUT, /* Host Timeout */
147 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
148 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
149 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
150 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
151 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
152 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
153 MGMT_STATUS_BUSY, /* Repeated Attempts */
154 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
155 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
156 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
157 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
158 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
159 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
160 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
161 MGMT_STATUS_FAILED, /* Unspecified Error */
162 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
163 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
164 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
165 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
166 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
167 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
168 MGMT_STATUS_FAILED, /* Unit Link Key Used */
169 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
170 MGMT_STATUS_TIMEOUT, /* Instant Passed */
171 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
172 MGMT_STATUS_FAILED, /* Transaction Collision */
173 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
174 MGMT_STATUS_REJECTED, /* QoS Rejected */
175 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
176 MGMT_STATUS_REJECTED, /* Insufficient Security */
177 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
178 MGMT_STATUS_BUSY, /* Role Switch Pending */
179 MGMT_STATUS_FAILED, /* Slot Violation */
180 MGMT_STATUS_FAILED, /* Role Switch Failed */
181 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
182 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
183 MGMT_STATUS_BUSY, /* Host Busy Pairing */
184 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
185 MGMT_STATUS_BUSY, /* Controller Busy */
186 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
187 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
188 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
189 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
190 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
193 static u8 mgmt_status(u8 hci_status)
195 if (hci_status < ARRAY_SIZE(mgmt_status_table))
196 return mgmt_status_table[hci_status];
198 return MGMT_STATUS_FAILED;
201 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
204 struct mgmt_hdr *hdr;
205 struct mgmt_ev_cmd_status *ev;
208 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
210 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
214 hdr = (void *) skb_put(skb, sizeof(*hdr));
216 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
217 hdr->index = cpu_to_le16(index);
218 hdr->len = cpu_to_le16(sizeof(*ev));
220 ev = (void *) skb_put(skb, sizeof(*ev));
222 ev->opcode = cpu_to_le16(cmd);
224 err = sock_queue_rcv_skb(sk, skb);
231 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
232 void *rp, size_t rp_len)
235 struct mgmt_hdr *hdr;
236 struct mgmt_ev_cmd_complete *ev;
239 BT_DBG("sock %p", sk);
241 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
245 hdr = (void *) skb_put(skb, sizeof(*hdr));
247 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
248 hdr->index = cpu_to_le16(index);
249 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
251 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
252 ev->opcode = cpu_to_le16(cmd);
256 memcpy(ev->data, rp, rp_len);
258 err = sock_queue_rcv_skb(sk, skb);
265 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
268 struct mgmt_rp_read_version rp;
270 BT_DBG("sock %p", sk);
272 rp.version = MGMT_VERSION;
273 rp.revision = cpu_to_le16(MGMT_REVISION);
275 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
279 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
282 struct mgmt_rp_read_commands *rp;
283 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
284 const u16 num_events = ARRAY_SIZE(mgmt_events);
289 BT_DBG("sock %p", sk);
291 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
293 rp = kmalloc(rp_size, GFP_KERNEL);
297 rp->num_commands = cpu_to_le16(num_commands);
298 rp->num_events = cpu_to_le16(num_events);
300 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
301 put_unaligned_le16(mgmt_commands[i], opcode);
303 for (i = 0; i < num_events; i++, opcode++)
304 put_unaligned_le16(mgmt_events[i], opcode);
306 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
313 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
316 struct mgmt_rp_read_index_list *rp;
322 BT_DBG("sock %p", sk);
324 read_lock(&hci_dev_list_lock);
327 list_for_each_entry(d, &hci_dev_list, list) {
328 if (d->dev_type == HCI_BREDR)
332 rp_len = sizeof(*rp) + (2 * count);
333 rp = kmalloc(rp_len, GFP_ATOMIC);
335 read_unlock(&hci_dev_list_lock);
340 list_for_each_entry(d, &hci_dev_list, list) {
341 if (test_bit(HCI_SETUP, &d->dev_flags))
344 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
347 if (d->dev_type == HCI_BREDR) {
348 rp->index[count++] = cpu_to_le16(d->id);
349 BT_DBG("Added hci%u", d->id);
353 rp->num_controllers = cpu_to_le16(count);
354 rp_len = sizeof(*rp) + (2 * count);
356 read_unlock(&hci_dev_list_lock);
358 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
366 static u32 get_supported_settings(struct hci_dev *hdev)
370 settings |= MGMT_SETTING_POWERED;
371 settings |= MGMT_SETTING_PAIRABLE;
372 settings |= MGMT_SETTING_DEBUG_KEYS;
374 if (lmp_bredr_capable(hdev)) {
375 settings |= MGMT_SETTING_CONNECTABLE;
376 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
377 settings |= MGMT_SETTING_FAST_CONNECTABLE;
378 settings |= MGMT_SETTING_DISCOVERABLE;
379 settings |= MGMT_SETTING_BREDR;
380 settings |= MGMT_SETTING_LINK_SECURITY;
382 if (lmp_ssp_capable(hdev)) {
383 settings |= MGMT_SETTING_SSP;
384 settings |= MGMT_SETTING_HS;
387 if (lmp_sc_capable(hdev) ||
388 test_bit(HCI_FORCE_SC, &hdev->dev_flags))
389 settings |= MGMT_SETTING_SECURE_CONN;
392 if (lmp_le_capable(hdev)) {
393 settings |= MGMT_SETTING_LE;
394 settings |= MGMT_SETTING_ADVERTISING;
395 settings |= MGMT_SETTING_PRIVACY;
401 static u32 get_current_settings(struct hci_dev *hdev)
405 if (hdev_is_powered(hdev))
406 settings |= MGMT_SETTING_POWERED;
408 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
409 settings |= MGMT_SETTING_CONNECTABLE;
411 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
412 settings |= MGMT_SETTING_FAST_CONNECTABLE;
414 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
415 settings |= MGMT_SETTING_DISCOVERABLE;
417 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
418 settings |= MGMT_SETTING_PAIRABLE;
420 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
421 settings |= MGMT_SETTING_BREDR;
423 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
424 settings |= MGMT_SETTING_LE;
426 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
427 settings |= MGMT_SETTING_LINK_SECURITY;
429 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
430 settings |= MGMT_SETTING_SSP;
432 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
433 settings |= MGMT_SETTING_HS;
435 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
436 settings |= MGMT_SETTING_ADVERTISING;
438 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
439 settings |= MGMT_SETTING_SECURE_CONN;
441 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
442 settings |= MGMT_SETTING_DEBUG_KEYS;
444 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
445 settings |= MGMT_SETTING_PRIVACY;
450 #define PNP_INFO_SVCLASS_ID 0x1200
452 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
454 u8 *ptr = data, *uuids_start = NULL;
455 struct bt_uuid *uuid;
460 list_for_each_entry(uuid, &hdev->uuids, list) {
463 if (uuid->size != 16)
466 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
470 if (uuid16 == PNP_INFO_SVCLASS_ID)
476 uuids_start[1] = EIR_UUID16_ALL;
480 /* Stop if not enough space to put next UUID */
481 if ((ptr - data) + sizeof(u16) > len) {
482 uuids_start[1] = EIR_UUID16_SOME;
486 *ptr++ = (uuid16 & 0x00ff);
487 *ptr++ = (uuid16 & 0xff00) >> 8;
488 uuids_start[0] += sizeof(uuid16);
494 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
496 u8 *ptr = data, *uuids_start = NULL;
497 struct bt_uuid *uuid;
502 list_for_each_entry(uuid, &hdev->uuids, list) {
503 if (uuid->size != 32)
509 uuids_start[1] = EIR_UUID32_ALL;
513 /* Stop if not enough space to put next UUID */
514 if ((ptr - data) + sizeof(u32) > len) {
515 uuids_start[1] = EIR_UUID32_SOME;
519 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
521 uuids_start[0] += sizeof(u32);
527 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
529 u8 *ptr = data, *uuids_start = NULL;
530 struct bt_uuid *uuid;
535 list_for_each_entry(uuid, &hdev->uuids, list) {
536 if (uuid->size != 128)
542 uuids_start[1] = EIR_UUID128_ALL;
546 /* Stop if not enough space to put next UUID */
547 if ((ptr - data) + 16 > len) {
548 uuids_start[1] = EIR_UUID128_SOME;
552 memcpy(ptr, uuid->uuid, 16);
554 uuids_start[0] += 16;
560 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
562 struct pending_cmd *cmd;
564 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
565 if (cmd->opcode == opcode)
572 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
577 name_len = strlen(hdev->dev_name);
579 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
581 if (name_len > max_len) {
583 ptr[1] = EIR_NAME_SHORT;
585 ptr[1] = EIR_NAME_COMPLETE;
587 ptr[0] = name_len + 1;
589 memcpy(ptr + 2, hdev->dev_name, name_len);
591 ad_len += (name_len + 2);
592 ptr += (name_len + 2);
598 static void update_scan_rsp_data(struct hci_request *req)
600 struct hci_dev *hdev = req->hdev;
601 struct hci_cp_le_set_scan_rsp_data cp;
604 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
607 memset(&cp, 0, sizeof(cp));
609 len = create_scan_rsp_data(hdev, cp.data);
611 if (hdev->scan_rsp_data_len == len &&
612 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
615 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
616 hdev->scan_rsp_data_len = len;
620 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
623 static u8 get_adv_discov_flags(struct hci_dev *hdev)
625 struct pending_cmd *cmd;
627 /* If there's a pending mgmt command the flags will not yet have
628 * their final values, so check for this first.
630 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
632 struct mgmt_mode *cp = cmd->param;
634 return LE_AD_GENERAL;
635 else if (cp->val == 0x02)
636 return LE_AD_LIMITED;
638 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
639 return LE_AD_LIMITED;
640 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
641 return LE_AD_GENERAL;
647 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
649 u8 ad_len = 0, flags = 0;
651 flags |= get_adv_discov_flags(hdev);
653 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
654 flags |= LE_AD_NO_BREDR;
657 BT_DBG("adv flags 0x%02x", flags);
667 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
669 ptr[1] = EIR_TX_POWER;
670 ptr[2] = (u8) hdev->adv_tx_power;
679 static void update_adv_data(struct hci_request *req)
681 struct hci_dev *hdev = req->hdev;
682 struct hci_cp_le_set_adv_data cp;
685 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
688 memset(&cp, 0, sizeof(cp));
690 len = create_adv_data(hdev, cp.data);
692 if (hdev->adv_data_len == len &&
693 memcmp(cp.data, hdev->adv_data, len) == 0)
696 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
697 hdev->adv_data_len = len;
701 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
704 static void create_eir(struct hci_dev *hdev, u8 *data)
709 name_len = strlen(hdev->dev_name);
715 ptr[1] = EIR_NAME_SHORT;
717 ptr[1] = EIR_NAME_COMPLETE;
719 /* EIR Data length */
720 ptr[0] = name_len + 1;
722 memcpy(ptr + 2, hdev->dev_name, name_len);
724 ptr += (name_len + 2);
727 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
729 ptr[1] = EIR_TX_POWER;
730 ptr[2] = (u8) hdev->inq_tx_power;
735 if (hdev->devid_source > 0) {
737 ptr[1] = EIR_DEVICE_ID;
739 put_unaligned_le16(hdev->devid_source, ptr + 2);
740 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
741 put_unaligned_le16(hdev->devid_product, ptr + 6);
742 put_unaligned_le16(hdev->devid_version, ptr + 8);
747 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
748 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
749 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
752 static void update_eir(struct hci_request *req)
754 struct hci_dev *hdev = req->hdev;
755 struct hci_cp_write_eir cp;
757 if (!hdev_is_powered(hdev))
760 if (!lmp_ext_inq_capable(hdev))
763 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
766 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
769 memset(&cp, 0, sizeof(cp));
771 create_eir(hdev, cp.data);
773 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
776 memcpy(hdev->eir, cp.data, sizeof(cp.data));
778 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
781 static u8 get_service_classes(struct hci_dev *hdev)
783 struct bt_uuid *uuid;
786 list_for_each_entry(uuid, &hdev->uuids, list)
787 val |= uuid->svc_hint;
792 static void update_class(struct hci_request *req)
794 struct hci_dev *hdev = req->hdev;
797 BT_DBG("%s", hdev->name);
799 if (!hdev_is_powered(hdev))
802 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
805 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
808 cod[0] = hdev->minor_class;
809 cod[1] = hdev->major_class;
810 cod[2] = get_service_classes(hdev);
812 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
815 if (memcmp(cod, hdev->dev_class, 3) == 0)
818 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
821 static bool get_connectable(struct hci_dev *hdev)
823 struct pending_cmd *cmd;
825 /* If there's a pending mgmt command the flag will not yet have
826 * it's final value, so check for this first.
828 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
830 struct mgmt_mode *cp = cmd->param;
834 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
837 static void enable_advertising(struct hci_request *req)
839 struct hci_dev *hdev = req->hdev;
840 struct hci_cp_le_set_adv_param cp;
841 u8 own_addr_type, enable = 0x01;
844 /* Clear the HCI_ADVERTISING bit temporarily so that the
845 * hci_update_random_address knows that it's safe to go ahead
846 * and write a new random address. The flag will be set back on
847 * as soon as the SET_ADV_ENABLE HCI command completes.
849 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
851 connectable = get_connectable(hdev);
853 /* Set require_privacy to true only when non-connectable
854 * advertising is used. In that case it is fine to use a
855 * non-resolvable private address.
857 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
860 memset(&cp, 0, sizeof(cp));
861 cp.min_interval = cpu_to_le16(0x0800);
862 cp.max_interval = cpu_to_le16(0x0800);
863 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
864 cp.own_address_type = own_addr_type;
865 cp.channel_map = hdev->le_adv_channel_map;
867 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
869 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
872 static void disable_advertising(struct hci_request *req)
876 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
879 static void service_cache_off(struct work_struct *work)
881 struct hci_dev *hdev = container_of(work, struct hci_dev,
883 struct hci_request req;
885 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
888 hci_req_init(&req, hdev);
895 hci_dev_unlock(hdev);
897 hci_req_run(&req, NULL);
900 static void rpa_expired(struct work_struct *work)
902 struct hci_dev *hdev = container_of(work, struct hci_dev,
904 struct hci_request req;
908 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
910 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
911 hci_conn_num(hdev, LE_LINK) > 0)
914 /* The generation of a new RPA and programming it into the
915 * controller happens in the enable_advertising() function.
918 hci_req_init(&req, hdev);
920 disable_advertising(&req);
921 enable_advertising(&req);
923 hci_req_run(&req, NULL);
926 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
928 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
931 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
932 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
934 /* Non-mgmt controlled devices get this bit set
935 * implicitly so that pairing works for them, however
936 * for mgmt we require user-space to explicitly enable
939 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
942 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
943 void *data, u16 data_len)
945 struct mgmt_rp_read_info rp;
947 BT_DBG("sock %p %s", sk, hdev->name);
951 memset(&rp, 0, sizeof(rp));
953 bacpy(&rp.bdaddr, &hdev->bdaddr);
955 rp.version = hdev->hci_ver;
956 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
958 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
959 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
961 memcpy(rp.dev_class, hdev->dev_class, 3);
963 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
964 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
966 hci_dev_unlock(hdev);
968 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
972 static void mgmt_pending_free(struct pending_cmd *cmd)
979 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
980 struct hci_dev *hdev, void *data,
983 struct pending_cmd *cmd;
985 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
989 cmd->opcode = opcode;
990 cmd->index = hdev->id;
992 cmd->param = kmalloc(len, GFP_KERNEL);
999 memcpy(cmd->param, data, len);
1004 list_add(&cmd->list, &hdev->mgmt_pending);
1009 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1010 void (*cb)(struct pending_cmd *cmd,
1014 struct pending_cmd *cmd, *tmp;
1016 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1017 if (opcode > 0 && cmd->opcode != opcode)
1024 static void mgmt_pending_remove(struct pending_cmd *cmd)
1026 list_del(&cmd->list);
1027 mgmt_pending_free(cmd);
1030 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1032 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1034 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1038 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1040 BT_DBG("%s status 0x%02x", hdev->name, status);
1042 if (hci_conn_count(hdev) == 0) {
1043 cancel_delayed_work(&hdev->power_off);
1044 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1048 static int clean_up_hci_state(struct hci_dev *hdev)
1050 struct hci_request req;
1051 struct hci_conn *conn;
1053 hci_req_init(&req, hdev);
1055 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1056 test_bit(HCI_PSCAN, &hdev->flags)) {
1058 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1061 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1062 disable_advertising(&req);
1064 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1065 hci_req_add_le_scan_disable(&req);
1068 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1069 struct hci_cp_disconnect dc;
1070 struct hci_cp_reject_conn_req rej;
1072 switch (conn->state) {
1075 dc.handle = cpu_to_le16(conn->handle);
1076 dc.reason = 0x15; /* Terminated due to Power Off */
1077 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1080 if (conn->type == LE_LINK)
1081 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1083 else if (conn->type == ACL_LINK)
1084 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1088 bacpy(&rej.bdaddr, &conn->dst);
1089 rej.reason = 0x15; /* Terminated due to Power Off */
1090 if (conn->type == ACL_LINK)
1091 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1093 else if (conn->type == SCO_LINK)
1094 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1100 return hci_req_run(&req, clean_up_hci_complete);
1103 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1106 struct mgmt_mode *cp = data;
1107 struct pending_cmd *cmd;
1110 BT_DBG("request for %s", hdev->name);
1112 if (cp->val != 0x00 && cp->val != 0x01)
1113 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1114 MGMT_STATUS_INVALID_PARAMS);
1118 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1119 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1124 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1125 cancel_delayed_work(&hdev->power_off);
1128 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1130 err = mgmt_powered(hdev, 1);
1135 if (!!cp->val == hdev_is_powered(hdev)) {
1136 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1140 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1147 queue_work(hdev->req_workqueue, &hdev->power_on);
1150 /* Disconnect connections, stop scans, etc */
1151 err = clean_up_hci_state(hdev);
1153 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1154 HCI_POWER_OFF_TIMEOUT);
1156 /* ENODATA means there were no HCI commands queued */
1157 if (err == -ENODATA) {
1158 cancel_delayed_work(&hdev->power_off);
1159 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1165 hci_dev_unlock(hdev);
1169 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1170 struct sock *skip_sk)
1172 struct sk_buff *skb;
1173 struct mgmt_hdr *hdr;
1175 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1179 hdr = (void *) skb_put(skb, sizeof(*hdr));
1180 hdr->opcode = cpu_to_le16(event);
1182 hdr->index = cpu_to_le16(hdev->id);
1184 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1185 hdr->len = cpu_to_le16(data_len);
1188 memcpy(skb_put(skb, data_len), data, data_len);
1191 __net_timestamp(skb);
1193 hci_send_to_control(skb, skip_sk);
1199 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1203 ev = cpu_to_le32(get_current_settings(hdev));
1205 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1210 struct hci_dev *hdev;
1214 static void settings_rsp(struct pending_cmd *cmd, void *data)
1216 struct cmd_lookup *match = data;
1218 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1220 list_del(&cmd->list);
1222 if (match->sk == NULL) {
1223 match->sk = cmd->sk;
1224 sock_hold(match->sk);
1227 mgmt_pending_free(cmd);
1230 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1234 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1235 mgmt_pending_remove(cmd);
1238 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1240 if (!lmp_bredr_capable(hdev))
1241 return MGMT_STATUS_NOT_SUPPORTED;
1242 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1243 return MGMT_STATUS_REJECTED;
1245 return MGMT_STATUS_SUCCESS;
1248 static u8 mgmt_le_support(struct hci_dev *hdev)
1250 if (!lmp_le_capable(hdev))
1251 return MGMT_STATUS_NOT_SUPPORTED;
1252 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1253 return MGMT_STATUS_REJECTED;
1255 return MGMT_STATUS_SUCCESS;
1258 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1260 struct pending_cmd *cmd;
1261 struct mgmt_mode *cp;
1262 struct hci_request req;
1265 BT_DBG("status 0x%02x", status);
1269 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1274 u8 mgmt_err = mgmt_status(status);
1275 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1276 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1282 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1285 if (hdev->discov_timeout > 0) {
1286 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1287 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1291 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1295 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1298 new_settings(hdev, cmd->sk);
1300 /* When the discoverable mode gets changed, make sure
1301 * that class of device has the limited discoverable
1302 * bit correctly set.
1304 hci_req_init(&req, hdev);
1306 hci_req_run(&req, NULL);
1309 mgmt_pending_remove(cmd);
1312 hci_dev_unlock(hdev);
1315 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1318 struct mgmt_cp_set_discoverable *cp = data;
1319 struct pending_cmd *cmd;
1320 struct hci_request req;
1325 BT_DBG("request for %s", hdev->name);
1327 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1328 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1329 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1330 MGMT_STATUS_REJECTED);
1332 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1333 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1334 MGMT_STATUS_INVALID_PARAMS);
1336 timeout = __le16_to_cpu(cp->timeout);
1338 /* Disabling discoverable requires that no timeout is set,
1339 * and enabling limited discoverable requires a timeout.
1341 if ((cp->val == 0x00 && timeout > 0) ||
1342 (cp->val == 0x02 && timeout == 0))
1343 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1344 MGMT_STATUS_INVALID_PARAMS);
1348 if (!hdev_is_powered(hdev) && timeout > 0) {
1349 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1350 MGMT_STATUS_NOT_POWERED);
1354 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1355 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1356 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1361 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1362 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1363 MGMT_STATUS_REJECTED);
1367 if (!hdev_is_powered(hdev)) {
1368 bool changed = false;
1370 /* Setting limited discoverable when powered off is
1371 * not a valid operation since it requires a timeout
1372 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1374 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1375 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1379 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1384 err = new_settings(hdev, sk);
1389 /* If the current mode is the same, then just update the timeout
1390 * value with the new value. And if only the timeout gets updated,
1391 * then no need for any HCI transactions.
1393 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1394 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1395 &hdev->dev_flags)) {
1396 cancel_delayed_work(&hdev->discov_off);
1397 hdev->discov_timeout = timeout;
1399 if (cp->val && hdev->discov_timeout > 0) {
1400 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1401 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1405 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1409 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1415 /* Cancel any potential discoverable timeout that might be
1416 * still active and store new timeout value. The arming of
1417 * the timeout happens in the complete handler.
1419 cancel_delayed_work(&hdev->discov_off);
1420 hdev->discov_timeout = timeout;
1422 /* Limited discoverable mode */
1423 if (cp->val == 0x02)
1424 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1426 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1428 hci_req_init(&req, hdev);
1430 /* The procedure for LE-only controllers is much simpler - just
1431 * update the advertising data.
1433 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1439 struct hci_cp_write_current_iac_lap hci_cp;
1441 if (cp->val == 0x02) {
1442 /* Limited discoverable mode */
1443 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1444 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1445 hci_cp.iac_lap[1] = 0x8b;
1446 hci_cp.iac_lap[2] = 0x9e;
1447 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1448 hci_cp.iac_lap[4] = 0x8b;
1449 hci_cp.iac_lap[5] = 0x9e;
1451 /* General discoverable mode */
1453 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1454 hci_cp.iac_lap[1] = 0x8b;
1455 hci_cp.iac_lap[2] = 0x9e;
1458 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1459 (hci_cp.num_iac * 3) + 1, &hci_cp);
1461 scan |= SCAN_INQUIRY;
1463 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1466 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1469 update_adv_data(&req);
1471 err = hci_req_run(&req, set_discoverable_complete);
1473 mgmt_pending_remove(cmd);
1476 hci_dev_unlock(hdev);
1480 static void write_fast_connectable(struct hci_request *req, bool enable)
1482 struct hci_dev *hdev = req->hdev;
1483 struct hci_cp_write_page_scan_activity acp;
1486 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1489 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1493 type = PAGE_SCAN_TYPE_INTERLACED;
1495 /* 160 msec page scan interval */
1496 acp.interval = cpu_to_le16(0x0100);
1498 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1500 /* default 1.28 sec page scan */
1501 acp.interval = cpu_to_le16(0x0800);
1504 acp.window = cpu_to_le16(0x0012);
1506 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1507 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1508 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1511 if (hdev->page_scan_type != type)
1512 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1515 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1517 struct pending_cmd *cmd;
1518 struct mgmt_mode *cp;
1521 BT_DBG("status 0x%02x", status);
1525 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1530 u8 mgmt_err = mgmt_status(status);
1531 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1537 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1539 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1541 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1544 new_settings(hdev, cmd->sk);
1547 mgmt_pending_remove(cmd);
1550 hci_dev_unlock(hdev);
1553 static int set_connectable_update_settings(struct hci_dev *hdev,
1554 struct sock *sk, u8 val)
1556 bool changed = false;
1559 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1563 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1565 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1566 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1569 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1574 return new_settings(hdev, sk);
1579 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1582 struct mgmt_mode *cp = data;
1583 struct pending_cmd *cmd;
1584 struct hci_request req;
1588 BT_DBG("request for %s", hdev->name);
1590 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1591 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1592 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1593 MGMT_STATUS_REJECTED);
1595 if (cp->val != 0x00 && cp->val != 0x01)
1596 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1597 MGMT_STATUS_INVALID_PARAMS);
1601 if (!hdev_is_powered(hdev)) {
1602 err = set_connectable_update_settings(hdev, sk, cp->val);
1606 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1607 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1608 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1613 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1619 hci_req_init(&req, hdev);
1621 /* If BR/EDR is not enabled and we disable advertising as a
1622 * by-product of disabling connectable, we need to update the
1623 * advertising flags.
1625 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1627 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1628 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1630 update_adv_data(&req);
1631 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1637 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1638 hdev->discov_timeout > 0)
1639 cancel_delayed_work(&hdev->discov_off);
1642 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1645 /* If we're going from non-connectable to connectable or
1646 * vice-versa when fast connectable is enabled ensure that fast
1647 * connectable gets disabled. write_fast_connectable won't do
1648 * anything if the page scan parameters are already what they
1651 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1652 write_fast_connectable(&req, false);
1654 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1655 hci_conn_num(hdev, LE_LINK) == 0) {
1656 disable_advertising(&req);
1657 enable_advertising(&req);
1660 err = hci_req_run(&req, set_connectable_complete);
1662 mgmt_pending_remove(cmd);
1663 if (err == -ENODATA)
1664 err = set_connectable_update_settings(hdev, sk,
1670 hci_dev_unlock(hdev);
1674 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1677 struct mgmt_mode *cp = data;
1681 BT_DBG("request for %s", hdev->name);
1683 if (cp->val != 0x00 && cp->val != 0x01)
1684 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1685 MGMT_STATUS_INVALID_PARAMS);
1690 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1692 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1694 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1699 err = new_settings(hdev, sk);
1702 hci_dev_unlock(hdev);
1706 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1709 struct mgmt_mode *cp = data;
1710 struct pending_cmd *cmd;
1714 BT_DBG("request for %s", hdev->name);
1716 status = mgmt_bredr_support(hdev);
1718 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1721 if (cp->val != 0x00 && cp->val != 0x01)
1722 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1723 MGMT_STATUS_INVALID_PARAMS);
1727 if (!hdev_is_powered(hdev)) {
1728 bool changed = false;
1730 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1731 &hdev->dev_flags)) {
1732 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1736 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1741 err = new_settings(hdev, sk);
1746 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1747 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1754 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1755 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1759 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1765 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1767 mgmt_pending_remove(cmd);
1772 hci_dev_unlock(hdev);
1776 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1778 struct mgmt_mode *cp = data;
1779 struct pending_cmd *cmd;
1783 BT_DBG("request for %s", hdev->name);
1785 status = mgmt_bredr_support(hdev);
1787 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1789 if (!lmp_ssp_capable(hdev))
1790 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1791 MGMT_STATUS_NOT_SUPPORTED);
1793 if (cp->val != 0x00 && cp->val != 0x01)
1794 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1795 MGMT_STATUS_INVALID_PARAMS);
1799 if (!hdev_is_powered(hdev)) {
1803 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1806 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1809 changed = test_and_clear_bit(HCI_HS_ENABLED,
1812 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1815 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1820 err = new_settings(hdev, sk);
1825 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1826 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1827 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1832 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1833 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1837 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1843 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1845 mgmt_pending_remove(cmd);
1850 hci_dev_unlock(hdev);
1854 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1856 struct mgmt_mode *cp = data;
1861 BT_DBG("request for %s", hdev->name);
1863 status = mgmt_bredr_support(hdev);
1865 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1867 if (!lmp_ssp_capable(hdev))
1868 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1869 MGMT_STATUS_NOT_SUPPORTED);
1871 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1872 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1873 MGMT_STATUS_REJECTED);
1875 if (cp->val != 0x00 && cp->val != 0x01)
1876 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1877 MGMT_STATUS_INVALID_PARAMS);
1882 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1884 if (hdev_is_powered(hdev)) {
1885 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1886 MGMT_STATUS_REJECTED);
1890 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1893 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1898 err = new_settings(hdev, sk);
1901 hci_dev_unlock(hdev);
1905 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1907 struct cmd_lookup match = { NULL, hdev };
1910 u8 mgmt_err = mgmt_status(status);
1912 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1917 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1919 new_settings(hdev, match.sk);
1924 /* Make sure the controller has a good default for
1925 * advertising data. Restrict the update to when LE
1926 * has actually been enabled. During power on, the
1927 * update in powered_update_hci will take care of it.
1929 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1930 struct hci_request req;
1934 hci_req_init(&req, hdev);
1935 update_adv_data(&req);
1936 update_scan_rsp_data(&req);
1937 hci_req_run(&req, NULL);
1939 hci_dev_unlock(hdev);
1943 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1945 struct mgmt_mode *cp = data;
1946 struct hci_cp_write_le_host_supported hci_cp;
1947 struct pending_cmd *cmd;
1948 struct hci_request req;
1952 BT_DBG("request for %s", hdev->name);
1954 if (!lmp_le_capable(hdev))
1955 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1956 MGMT_STATUS_NOT_SUPPORTED);
1958 if (cp->val != 0x00 && cp->val != 0x01)
1959 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1960 MGMT_STATUS_INVALID_PARAMS);
1962 /* LE-only devices do not allow toggling LE on/off */
1963 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1964 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1965 MGMT_STATUS_REJECTED);
1970 enabled = lmp_host_le_capable(hdev);
1972 if (!hdev_is_powered(hdev) || val == enabled) {
1973 bool changed = false;
1975 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1976 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1980 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1981 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1985 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1990 err = new_settings(hdev, sk);
1995 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1996 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1997 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2002 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2008 hci_req_init(&req, hdev);
2010 memset(&hci_cp, 0, sizeof(hci_cp));
2014 hci_cp.simul = lmp_le_br_capable(hdev);
2016 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2017 disable_advertising(&req);
2020 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2023 err = hci_req_run(&req, le_enable_complete);
2025 mgmt_pending_remove(cmd);
2028 hci_dev_unlock(hdev);
2032 /* This is a helper function to test for pending mgmt commands that can
2033 * cause CoD or EIR HCI commands. We can only allow one such pending
2034 * mgmt command at a time since otherwise we cannot easily track what
2035 * the current values are, will be, and based on that calculate if a new
2036 * HCI command needs to be sent and if yes with what value.
2038 static bool pending_eir_or_class(struct hci_dev *hdev)
2040 struct pending_cmd *cmd;
2042 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2043 switch (cmd->opcode) {
2044 case MGMT_OP_ADD_UUID:
2045 case MGMT_OP_REMOVE_UUID:
2046 case MGMT_OP_SET_DEV_CLASS:
2047 case MGMT_OP_SET_POWERED:
2055 static const u8 bluetooth_base_uuid[] = {
2056 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2057 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2060 static u8 get_uuid_size(const u8 *uuid)
2064 if (memcmp(uuid, bluetooth_base_uuid, 12))
2067 val = get_unaligned_le32(&uuid[12]);
2074 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2076 struct pending_cmd *cmd;
2080 cmd = mgmt_pending_find(mgmt_op, hdev);
2084 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2085 hdev->dev_class, 3);
2087 mgmt_pending_remove(cmd);
2090 hci_dev_unlock(hdev);
2093 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2095 BT_DBG("status 0x%02x", status);
2097 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2100 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2102 struct mgmt_cp_add_uuid *cp = data;
2103 struct pending_cmd *cmd;
2104 struct hci_request req;
2105 struct bt_uuid *uuid;
2108 BT_DBG("request for %s", hdev->name);
2112 if (pending_eir_or_class(hdev)) {
2113 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2118 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2124 memcpy(uuid->uuid, cp->uuid, 16);
2125 uuid->svc_hint = cp->svc_hint;
2126 uuid->size = get_uuid_size(cp->uuid);
2128 list_add_tail(&uuid->list, &hdev->uuids);
2130 hci_req_init(&req, hdev);
2135 err = hci_req_run(&req, add_uuid_complete);
2137 if (err != -ENODATA)
2140 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2141 hdev->dev_class, 3);
2145 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2154 hci_dev_unlock(hdev);
2158 static bool enable_service_cache(struct hci_dev *hdev)
2160 if (!hdev_is_powered(hdev))
2163 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2164 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2172 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2174 BT_DBG("status 0x%02x", status);
2176 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2179 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2182 struct mgmt_cp_remove_uuid *cp = data;
2183 struct pending_cmd *cmd;
2184 struct bt_uuid *match, *tmp;
2185 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2186 struct hci_request req;
2189 BT_DBG("request for %s", hdev->name);
2193 if (pending_eir_or_class(hdev)) {
2194 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2199 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2200 hci_uuids_clear(hdev);
2202 if (enable_service_cache(hdev)) {
2203 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2204 0, hdev->dev_class, 3);
2213 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2214 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2217 list_del(&match->list);
2223 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2224 MGMT_STATUS_INVALID_PARAMS);
2229 hci_req_init(&req, hdev);
2234 err = hci_req_run(&req, remove_uuid_complete);
2236 if (err != -ENODATA)
2239 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2240 hdev->dev_class, 3);
2244 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2253 hci_dev_unlock(hdev);
2257 static void set_class_complete(struct hci_dev *hdev, u8 status)
2259 BT_DBG("status 0x%02x", status);
2261 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2264 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2267 struct mgmt_cp_set_dev_class *cp = data;
2268 struct pending_cmd *cmd;
2269 struct hci_request req;
2272 BT_DBG("request for %s", hdev->name);
2274 if (!lmp_bredr_capable(hdev))
2275 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2276 MGMT_STATUS_NOT_SUPPORTED);
2280 if (pending_eir_or_class(hdev)) {
2281 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2286 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2287 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2288 MGMT_STATUS_INVALID_PARAMS);
2292 hdev->major_class = cp->major;
2293 hdev->minor_class = cp->minor;
2295 if (!hdev_is_powered(hdev)) {
2296 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2297 hdev->dev_class, 3);
2301 hci_req_init(&req, hdev);
2303 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2304 hci_dev_unlock(hdev);
2305 cancel_delayed_work_sync(&hdev->service_cache);
2312 err = hci_req_run(&req, set_class_complete);
2314 if (err != -ENODATA)
2317 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2318 hdev->dev_class, 3);
2322 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2331 hci_dev_unlock(hdev);
2335 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2338 struct mgmt_cp_load_link_keys *cp = data;
2339 u16 key_count, expected_len;
2343 BT_DBG("request for %s", hdev->name);
2345 if (!lmp_bredr_capable(hdev))
2346 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2347 MGMT_STATUS_NOT_SUPPORTED);
2349 key_count = __le16_to_cpu(cp->key_count);
2351 expected_len = sizeof(*cp) + key_count *
2352 sizeof(struct mgmt_link_key_info);
2353 if (expected_len != len) {
2354 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2356 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2357 MGMT_STATUS_INVALID_PARAMS);
2360 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2361 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2362 MGMT_STATUS_INVALID_PARAMS);
2364 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2367 for (i = 0; i < key_count; i++) {
2368 struct mgmt_link_key_info *key = &cp->keys[i];
2370 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2371 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2372 MGMT_STATUS_INVALID_PARAMS);
2377 hci_link_keys_clear(hdev);
2380 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2382 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2385 new_settings(hdev, NULL);
2387 for (i = 0; i < key_count; i++) {
2388 struct mgmt_link_key_info *key = &cp->keys[i];
2390 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2391 key->type, key->pin_len);
2394 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2396 hci_dev_unlock(hdev);
2401 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2402 u8 addr_type, struct sock *skip_sk)
2404 struct mgmt_ev_device_unpaired ev;
2406 bacpy(&ev.addr.bdaddr, bdaddr);
2407 ev.addr.type = addr_type;
2409 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2413 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2416 struct mgmt_cp_unpair_device *cp = data;
2417 struct mgmt_rp_unpair_device rp;
2418 struct hci_cp_disconnect dc;
2419 struct pending_cmd *cmd;
2420 struct hci_conn *conn;
2423 memset(&rp, 0, sizeof(rp));
2424 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2425 rp.addr.type = cp->addr.type;
2427 if (!bdaddr_type_is_valid(cp->addr.type))
2428 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2429 MGMT_STATUS_INVALID_PARAMS,
2432 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2433 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2434 MGMT_STATUS_INVALID_PARAMS,
2439 if (!hdev_is_powered(hdev)) {
2440 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2441 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2445 if (cp->addr.type == BDADDR_BREDR) {
2446 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2450 if (cp->addr.type == BDADDR_LE_PUBLIC)
2451 addr_type = ADDR_LE_DEV_PUBLIC;
2453 addr_type = ADDR_LE_DEV_RANDOM;
2455 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2457 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2459 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2463 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2464 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2468 if (cp->disconnect) {
2469 if (cp->addr.type == BDADDR_BREDR)
2470 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2473 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2480 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2482 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2486 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2493 dc.handle = cpu_to_le16(conn->handle);
2494 dc.reason = 0x13; /* Remote User Terminated Connection */
2495 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2497 mgmt_pending_remove(cmd);
2500 hci_dev_unlock(hdev);
2504 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2507 struct mgmt_cp_disconnect *cp = data;
2508 struct mgmt_rp_disconnect rp;
2509 struct hci_cp_disconnect dc;
2510 struct pending_cmd *cmd;
2511 struct hci_conn *conn;
2516 memset(&rp, 0, sizeof(rp));
2517 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2518 rp.addr.type = cp->addr.type;
2520 if (!bdaddr_type_is_valid(cp->addr.type))
2521 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2522 MGMT_STATUS_INVALID_PARAMS,
2527 if (!test_bit(HCI_UP, &hdev->flags)) {
2528 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2529 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2533 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2534 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2535 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2539 if (cp->addr.type == BDADDR_BREDR)
2540 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2543 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2545 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2546 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2547 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2551 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2557 dc.handle = cpu_to_le16(conn->handle);
2558 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2560 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2562 mgmt_pending_remove(cmd);
2565 hci_dev_unlock(hdev);
2569 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2571 switch (link_type) {
2573 switch (addr_type) {
2574 case ADDR_LE_DEV_PUBLIC:
2575 return BDADDR_LE_PUBLIC;
2578 /* Fallback to LE Random address type */
2579 return BDADDR_LE_RANDOM;
2583 /* Fallback to BR/EDR type */
2584 return BDADDR_BREDR;
2588 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2591 struct mgmt_rp_get_connections *rp;
2601 if (!hdev_is_powered(hdev)) {
2602 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2603 MGMT_STATUS_NOT_POWERED);
2608 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2609 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2613 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2614 rp = kmalloc(rp_len, GFP_KERNEL);
2621 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2622 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2624 bacpy(&rp->addr[i].bdaddr, &c->dst);
2625 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2626 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2631 rp->conn_count = cpu_to_le16(i);
2633 /* Recalculate length in case of filtered SCO connections, etc */
2634 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2636 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2642 hci_dev_unlock(hdev);
2646 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2647 struct mgmt_cp_pin_code_neg_reply *cp)
2649 struct pending_cmd *cmd;
2652 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2657 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2658 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2660 mgmt_pending_remove(cmd);
2665 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2668 struct hci_conn *conn;
2669 struct mgmt_cp_pin_code_reply *cp = data;
2670 struct hci_cp_pin_code_reply reply;
2671 struct pending_cmd *cmd;
2678 if (!hdev_is_powered(hdev)) {
2679 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2680 MGMT_STATUS_NOT_POWERED);
2684 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2686 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2687 MGMT_STATUS_NOT_CONNECTED);
2691 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2692 struct mgmt_cp_pin_code_neg_reply ncp;
2694 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2696 BT_ERR("PIN code is not 16 bytes long");
2698 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2700 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2701 MGMT_STATUS_INVALID_PARAMS);
2706 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2712 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2713 reply.pin_len = cp->pin_len;
2714 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2716 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2718 mgmt_pending_remove(cmd);
2721 hci_dev_unlock(hdev);
2725 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2728 struct mgmt_cp_set_io_capability *cp = data;
2734 hdev->io_capability = cp->io_capability;
2736 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2737 hdev->io_capability);
2739 hci_dev_unlock(hdev);
2741 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2745 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2747 struct hci_dev *hdev = conn->hdev;
2748 struct pending_cmd *cmd;
2750 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2751 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2754 if (cmd->user_data != conn)
2763 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2765 struct mgmt_rp_pair_device rp;
2766 struct hci_conn *conn = cmd->user_data;
2768 bacpy(&rp.addr.bdaddr, &conn->dst);
2769 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2771 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2774 /* So we don't get further callbacks for this connection */
2775 conn->connect_cfm_cb = NULL;
2776 conn->security_cfm_cb = NULL;
2777 conn->disconn_cfm_cb = NULL;
2779 hci_conn_drop(conn);
2781 mgmt_pending_remove(cmd);
2784 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2786 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2787 struct pending_cmd *cmd;
2789 cmd = find_pairing(conn);
2791 pairing_complete(cmd, status);
2794 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2796 struct pending_cmd *cmd;
2798 BT_DBG("status %u", status);
2800 cmd = find_pairing(conn);
2802 BT_DBG("Unable to find a pending command");
2804 pairing_complete(cmd, mgmt_status(status));
2807 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2809 struct pending_cmd *cmd;
2811 BT_DBG("status %u", status);
2816 cmd = find_pairing(conn);
2818 BT_DBG("Unable to find a pending command");
2820 pairing_complete(cmd, mgmt_status(status));
2823 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2826 struct mgmt_cp_pair_device *cp = data;
2827 struct mgmt_rp_pair_device rp;
2828 struct pending_cmd *cmd;
2829 u8 sec_level, auth_type;
2830 struct hci_conn *conn;
2835 memset(&rp, 0, sizeof(rp));
2836 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2837 rp.addr.type = cp->addr.type;
2839 if (!bdaddr_type_is_valid(cp->addr.type))
2840 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2841 MGMT_STATUS_INVALID_PARAMS,
2846 if (!hdev_is_powered(hdev)) {
2847 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2848 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2852 sec_level = BT_SECURITY_MEDIUM;
2853 if (cp->io_cap == 0x03)
2854 auth_type = HCI_AT_DEDICATED_BONDING;
2856 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2858 if (cp->addr.type == BDADDR_BREDR) {
2859 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2864 /* Convert from L2CAP channel address type to HCI address type
2866 if (cp->addr.type == BDADDR_LE_PUBLIC)
2867 addr_type = ADDR_LE_DEV_PUBLIC;
2869 addr_type = ADDR_LE_DEV_RANDOM;
2871 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2872 sec_level, auth_type);
2878 if (PTR_ERR(conn) == -EBUSY)
2879 status = MGMT_STATUS_BUSY;
2881 status = MGMT_STATUS_CONNECT_FAILED;
2883 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2889 if (conn->connect_cfm_cb) {
2890 hci_conn_drop(conn);
2891 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2892 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2896 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2899 hci_conn_drop(conn);
2903 /* For LE, just connecting isn't a proof that the pairing finished */
2904 if (cp->addr.type == BDADDR_BREDR) {
2905 conn->connect_cfm_cb = pairing_complete_cb;
2906 conn->security_cfm_cb = pairing_complete_cb;
2907 conn->disconn_cfm_cb = pairing_complete_cb;
2909 conn->connect_cfm_cb = le_pairing_complete_cb;
2910 conn->security_cfm_cb = le_pairing_complete_cb;
2911 conn->disconn_cfm_cb = le_pairing_complete_cb;
2914 conn->io_capability = cp->io_cap;
2915 cmd->user_data = conn;
2917 if (conn->state == BT_CONNECTED &&
2918 hci_conn_security(conn, sec_level, auth_type))
2919 pairing_complete(cmd, 0);
2924 hci_dev_unlock(hdev);
2928 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2931 struct mgmt_addr_info *addr = data;
2932 struct pending_cmd *cmd;
2933 struct hci_conn *conn;
2940 if (!hdev_is_powered(hdev)) {
2941 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2942 MGMT_STATUS_NOT_POWERED);
2946 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2948 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2949 MGMT_STATUS_INVALID_PARAMS);
2953 conn = cmd->user_data;
2955 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2956 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2957 MGMT_STATUS_INVALID_PARAMS);
2961 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2963 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2964 addr, sizeof(*addr));
2966 hci_dev_unlock(hdev);
2970 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2971 struct mgmt_addr_info *addr, u16 mgmt_op,
2972 u16 hci_op, __le32 passkey)
2974 struct pending_cmd *cmd;
2975 struct hci_conn *conn;
2980 if (!hdev_is_powered(hdev)) {
2981 err = cmd_complete(sk, hdev->id, mgmt_op,
2982 MGMT_STATUS_NOT_POWERED, addr,
2987 if (addr->type == BDADDR_BREDR)
2988 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2990 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2993 err = cmd_complete(sk, hdev->id, mgmt_op,
2994 MGMT_STATUS_NOT_CONNECTED, addr,
2999 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3000 /* Continue with pairing via SMP */
3001 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3004 err = cmd_complete(sk, hdev->id, mgmt_op,
3005 MGMT_STATUS_SUCCESS, addr,
3008 err = cmd_complete(sk, hdev->id, mgmt_op,
3009 MGMT_STATUS_FAILED, addr,
3015 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3021 /* Continue with pairing via HCI */
3022 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3023 struct hci_cp_user_passkey_reply cp;
3025 bacpy(&cp.bdaddr, &addr->bdaddr);
3026 cp.passkey = passkey;
3027 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3029 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3033 mgmt_pending_remove(cmd);
3036 hci_dev_unlock(hdev);
3040 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3041 void *data, u16 len)
3043 struct mgmt_cp_pin_code_neg_reply *cp = data;
3047 return user_pairing_resp(sk, hdev, &cp->addr,
3048 MGMT_OP_PIN_CODE_NEG_REPLY,
3049 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3052 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3055 struct mgmt_cp_user_confirm_reply *cp = data;
3059 if (len != sizeof(*cp))
3060 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3061 MGMT_STATUS_INVALID_PARAMS);
3063 return user_pairing_resp(sk, hdev, &cp->addr,
3064 MGMT_OP_USER_CONFIRM_REPLY,
3065 HCI_OP_USER_CONFIRM_REPLY, 0);
3068 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3069 void *data, u16 len)
3071 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3075 return user_pairing_resp(sk, hdev, &cp->addr,
3076 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3077 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3080 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3083 struct mgmt_cp_user_passkey_reply *cp = data;
3087 return user_pairing_resp(sk, hdev, &cp->addr,
3088 MGMT_OP_USER_PASSKEY_REPLY,
3089 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3092 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3093 void *data, u16 len)
3095 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3099 return user_pairing_resp(sk, hdev, &cp->addr,
3100 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3101 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3104 static void update_name(struct hci_request *req)
3106 struct hci_dev *hdev = req->hdev;
3107 struct hci_cp_write_local_name cp;
3109 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3111 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3114 static void set_name_complete(struct hci_dev *hdev, u8 status)
3116 struct mgmt_cp_set_local_name *cp;
3117 struct pending_cmd *cmd;
3119 BT_DBG("status 0x%02x", status);
3123 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3130 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3131 mgmt_status(status));
3133 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3136 mgmt_pending_remove(cmd);
3139 hci_dev_unlock(hdev);
3142 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3145 struct mgmt_cp_set_local_name *cp = data;
3146 struct pending_cmd *cmd;
3147 struct hci_request req;
3154 /* If the old values are the same as the new ones just return a
3155 * direct command complete event.
3157 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3158 !memcmp(hdev->short_name, cp->short_name,
3159 sizeof(hdev->short_name))) {
3160 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3165 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3167 if (!hdev_is_powered(hdev)) {
3168 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3170 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3175 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3181 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3187 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3189 hci_req_init(&req, hdev);
3191 if (lmp_bredr_capable(hdev)) {
3196 /* The name is stored in the scan response data and so
3197 * no need to udpate the advertising data here.
3199 if (lmp_le_capable(hdev))
3200 update_scan_rsp_data(&req);
3202 err = hci_req_run(&req, set_name_complete);
3204 mgmt_pending_remove(cmd);
3207 hci_dev_unlock(hdev);
3211 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3212 void *data, u16 data_len)
3214 struct pending_cmd *cmd;
3217 BT_DBG("%s", hdev->name);
3221 if (!hdev_is_powered(hdev)) {
3222 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3223 MGMT_STATUS_NOT_POWERED);
3227 if (!lmp_ssp_capable(hdev)) {
3228 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3229 MGMT_STATUS_NOT_SUPPORTED);
3233 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3234 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3239 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3245 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3246 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3249 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3252 mgmt_pending_remove(cmd);
3255 hci_dev_unlock(hdev);
3259 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3260 void *data, u16 len)
3264 BT_DBG("%s ", hdev->name);
3268 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3269 struct mgmt_cp_add_remote_oob_data *cp = data;
3272 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3273 cp->hash, cp->randomizer);
3275 status = MGMT_STATUS_FAILED;
3277 status = MGMT_STATUS_SUCCESS;
3279 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3280 status, &cp->addr, sizeof(cp->addr));
3281 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3282 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3285 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3291 status = MGMT_STATUS_FAILED;
3293 status = MGMT_STATUS_SUCCESS;
3295 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3296 status, &cp->addr, sizeof(cp->addr));
3298 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3299 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3300 MGMT_STATUS_INVALID_PARAMS);
3303 hci_dev_unlock(hdev);
3307 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3308 void *data, u16 len)
3310 struct mgmt_cp_remove_remote_oob_data *cp = data;
3314 BT_DBG("%s", hdev->name);
3318 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3320 status = MGMT_STATUS_INVALID_PARAMS;
3322 status = MGMT_STATUS_SUCCESS;
3324 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3325 status, &cp->addr, sizeof(cp->addr));
3327 hci_dev_unlock(hdev);
3331 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3333 struct pending_cmd *cmd;
3337 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3339 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3343 type = hdev->discovery.type;
3345 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3346 &type, sizeof(type));
3347 mgmt_pending_remove(cmd);
3352 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3354 BT_DBG("status %d", status);
3358 mgmt_start_discovery_failed(hdev, status);
3359 hci_dev_unlock(hdev);
3364 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3365 hci_dev_unlock(hdev);
3367 switch (hdev->discovery.type) {
3368 case DISCOV_TYPE_LE:
3369 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3373 case DISCOV_TYPE_INTERLEAVED:
3374 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3375 DISCOV_INTERLEAVED_TIMEOUT);
3378 case DISCOV_TYPE_BREDR:
3382 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3386 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3387 void *data, u16 len)
3389 struct mgmt_cp_start_discovery *cp = data;
3390 struct pending_cmd *cmd;
3391 struct hci_cp_le_set_scan_param param_cp;
3392 struct hci_cp_le_set_scan_enable enable_cp;
3393 struct hci_cp_inquiry inq_cp;
3394 struct hci_request req;
3395 /* General inquiry access code (GIAC) */
3396 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3397 u8 status, own_addr_type;
3400 BT_DBG("%s", hdev->name);
3404 if (!hdev_is_powered(hdev)) {
3405 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3406 MGMT_STATUS_NOT_POWERED);
3410 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3411 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3416 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3417 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3422 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3428 hdev->discovery.type = cp->type;
3430 hci_req_init(&req, hdev);
3432 switch (hdev->discovery.type) {
3433 case DISCOV_TYPE_BREDR:
3434 status = mgmt_bredr_support(hdev);
3436 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3438 mgmt_pending_remove(cmd);
3442 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3443 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3445 mgmt_pending_remove(cmd);
3449 hci_inquiry_cache_flush(hdev);
3451 memset(&inq_cp, 0, sizeof(inq_cp));
3452 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3453 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3454 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3457 case DISCOV_TYPE_LE:
3458 case DISCOV_TYPE_INTERLEAVED:
3459 status = mgmt_le_support(hdev);
3461 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3463 mgmt_pending_remove(cmd);
3467 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3468 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3469 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3470 MGMT_STATUS_NOT_SUPPORTED);
3471 mgmt_pending_remove(cmd);
3475 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3476 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3477 MGMT_STATUS_REJECTED);
3478 mgmt_pending_remove(cmd);
3482 /* If controller is scanning, it means the background scanning
3483 * is running. Thus, we should temporarily stop it in order to
3484 * set the discovery scanning parameters.
3486 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3487 hci_req_add_le_scan_disable(&req);
3489 memset(¶m_cp, 0, sizeof(param_cp));
3491 /* All active scans will be done with either a resolvable
3492 * private address (when privacy feature has been enabled)
3493 * or unresolvable private address.
3495 err = hci_update_random_address(&req, true, &own_addr_type);
3497 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3498 MGMT_STATUS_FAILED);
3499 mgmt_pending_remove(cmd);
3503 param_cp.type = LE_SCAN_ACTIVE;
3504 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3505 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3506 param_cp.own_address_type = own_addr_type;
3507 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3510 memset(&enable_cp, 0, sizeof(enable_cp));
3511 enable_cp.enable = LE_SCAN_ENABLE;
3512 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3513 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3518 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3519 MGMT_STATUS_INVALID_PARAMS);
3520 mgmt_pending_remove(cmd);
3524 err = hci_req_run(&req, start_discovery_complete);
3526 mgmt_pending_remove(cmd);
3528 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3531 hci_dev_unlock(hdev);
3535 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3537 struct pending_cmd *cmd;
3540 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3544 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3545 &hdev->discovery.type, sizeof(hdev->discovery.type));
3546 mgmt_pending_remove(cmd);
3551 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3553 BT_DBG("status %d", status);
3558 mgmt_stop_discovery_failed(hdev, status);
3562 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3565 hci_dev_unlock(hdev);
3568 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3571 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3572 struct pending_cmd *cmd;
3573 struct hci_cp_remote_name_req_cancel cp;
3574 struct inquiry_entry *e;
3575 struct hci_request req;
3578 BT_DBG("%s", hdev->name);
3582 if (!hci_discovery_active(hdev)) {
3583 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3584 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3585 sizeof(mgmt_cp->type));
3589 if (hdev->discovery.type != mgmt_cp->type) {
3590 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3591 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3592 sizeof(mgmt_cp->type));
3596 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3602 hci_req_init(&req, hdev);
3604 switch (hdev->discovery.state) {
3605 case DISCOVERY_FINDING:
3606 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3607 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3609 cancel_delayed_work(&hdev->le_scan_disable);
3611 hci_req_add_le_scan_disable(&req);
3616 case DISCOVERY_RESOLVING:
3617 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3620 mgmt_pending_remove(cmd);
3621 err = cmd_complete(sk, hdev->id,
3622 MGMT_OP_STOP_DISCOVERY, 0,
3624 sizeof(mgmt_cp->type));
3625 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3629 bacpy(&cp.bdaddr, &e->data.bdaddr);
3630 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3636 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3638 mgmt_pending_remove(cmd);
3639 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3640 MGMT_STATUS_FAILED, &mgmt_cp->type,
3641 sizeof(mgmt_cp->type));
3645 err = hci_req_run(&req, stop_discovery_complete);
3647 mgmt_pending_remove(cmd);
3649 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3652 hci_dev_unlock(hdev);
3656 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3659 struct mgmt_cp_confirm_name *cp = data;
3660 struct inquiry_entry *e;
3663 BT_DBG("%s", hdev->name);
3667 if (!hci_discovery_active(hdev)) {
3668 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3669 MGMT_STATUS_FAILED, &cp->addr,
3674 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3676 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3677 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3682 if (cp->name_known) {
3683 e->name_state = NAME_KNOWN;
3686 e->name_state = NAME_NEEDED;
3687 hci_inquiry_cache_update_resolve(hdev, e);
3690 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3694 hci_dev_unlock(hdev);
3698 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3701 struct mgmt_cp_block_device *cp = data;
3705 BT_DBG("%s", hdev->name);
3707 if (!bdaddr_type_is_valid(cp->addr.type))
3708 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3709 MGMT_STATUS_INVALID_PARAMS,
3710 &cp->addr, sizeof(cp->addr));
3714 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3716 status = MGMT_STATUS_FAILED;
3718 status = MGMT_STATUS_SUCCESS;
3720 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3721 &cp->addr, sizeof(cp->addr));
3723 hci_dev_unlock(hdev);
3728 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3731 struct mgmt_cp_unblock_device *cp = data;
3735 BT_DBG("%s", hdev->name);
3737 if (!bdaddr_type_is_valid(cp->addr.type))
3738 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3739 MGMT_STATUS_INVALID_PARAMS,
3740 &cp->addr, sizeof(cp->addr));
3744 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3746 status = MGMT_STATUS_INVALID_PARAMS;
3748 status = MGMT_STATUS_SUCCESS;
3750 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3751 &cp->addr, sizeof(cp->addr));
3753 hci_dev_unlock(hdev);
3758 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3761 struct mgmt_cp_set_device_id *cp = data;
3762 struct hci_request req;
3766 BT_DBG("%s", hdev->name);
3768 source = __le16_to_cpu(cp->source);
3770 if (source > 0x0002)
3771 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3772 MGMT_STATUS_INVALID_PARAMS);
3776 hdev->devid_source = source;
3777 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3778 hdev->devid_product = __le16_to_cpu(cp->product);
3779 hdev->devid_version = __le16_to_cpu(cp->version);
3781 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3783 hci_req_init(&req, hdev);
3785 hci_req_run(&req, NULL);
3787 hci_dev_unlock(hdev);
3792 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3794 struct cmd_lookup match = { NULL, hdev };
3797 u8 mgmt_err = mgmt_status(status);
3799 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3800 cmd_status_rsp, &mgmt_err);
3804 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3807 new_settings(hdev, match.sk);
3813 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3816 struct mgmt_mode *cp = data;
3817 struct pending_cmd *cmd;
3818 struct hci_request req;
3819 u8 val, enabled, status;
3822 BT_DBG("request for %s", hdev->name);
3824 status = mgmt_le_support(hdev);
3826 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3829 if (cp->val != 0x00 && cp->val != 0x01)
3830 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3831 MGMT_STATUS_INVALID_PARAMS);
3836 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3838 /* The following conditions are ones which mean that we should
3839 * not do any HCI communication but directly send a mgmt
3840 * response to user space (after toggling the flag if
3843 if (!hdev_is_powered(hdev) || val == enabled ||
3844 hci_conn_num(hdev, LE_LINK) > 0) {
3845 bool changed = false;
3847 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3848 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3852 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3857 err = new_settings(hdev, sk);
3862 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3863 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3864 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3869 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3875 hci_req_init(&req, hdev);
3878 enable_advertising(&req);
3880 disable_advertising(&req);
3882 err = hci_req_run(&req, set_advertising_complete);
3884 mgmt_pending_remove(cmd);
3887 hci_dev_unlock(hdev);
3891 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3892 void *data, u16 len)
3894 struct mgmt_cp_set_static_address *cp = data;
3897 BT_DBG("%s", hdev->name);
3899 if (!lmp_le_capable(hdev))
3900 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3901 MGMT_STATUS_NOT_SUPPORTED);
3903 if (hdev_is_powered(hdev))
3904 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3905 MGMT_STATUS_REJECTED);
3907 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3908 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3909 return cmd_status(sk, hdev->id,
3910 MGMT_OP_SET_STATIC_ADDRESS,
3911 MGMT_STATUS_INVALID_PARAMS);
3913 /* Two most significant bits shall be set */
3914 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3915 return cmd_status(sk, hdev->id,
3916 MGMT_OP_SET_STATIC_ADDRESS,
3917 MGMT_STATUS_INVALID_PARAMS);
3922 bacpy(&hdev->static_addr, &cp->bdaddr);
3924 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3926 hci_dev_unlock(hdev);
3931 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3932 void *data, u16 len)
3934 struct mgmt_cp_set_scan_params *cp = data;
3935 __u16 interval, window;
3938 BT_DBG("%s", hdev->name);
3940 if (!lmp_le_capable(hdev))
3941 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3942 MGMT_STATUS_NOT_SUPPORTED);
3944 interval = __le16_to_cpu(cp->interval);
3946 if (interval < 0x0004 || interval > 0x4000)
3947 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3948 MGMT_STATUS_INVALID_PARAMS);
3950 window = __le16_to_cpu(cp->window);
3952 if (window < 0x0004 || window > 0x4000)
3953 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3954 MGMT_STATUS_INVALID_PARAMS);
3956 if (window > interval)
3957 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3958 MGMT_STATUS_INVALID_PARAMS);
3962 hdev->le_scan_interval = interval;
3963 hdev->le_scan_window = window;
3965 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3967 /* If background scan is running, restart it so new parameters are
3970 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
3971 hdev->discovery.state == DISCOVERY_STOPPED) {
3972 struct hci_request req;
3974 hci_req_init(&req, hdev);
3976 hci_req_add_le_scan_disable(&req);
3977 hci_req_add_le_passive_scan(&req);
3979 hci_req_run(&req, NULL);
3982 hci_dev_unlock(hdev);
3987 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3989 struct pending_cmd *cmd;
3991 BT_DBG("status 0x%02x", status);
3995 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4000 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4001 mgmt_status(status));
4003 struct mgmt_mode *cp = cmd->param;
4006 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4008 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4010 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4011 new_settings(hdev, cmd->sk);
4014 mgmt_pending_remove(cmd);
4017 hci_dev_unlock(hdev);
4020 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4021 void *data, u16 len)
4023 struct mgmt_mode *cp = data;
4024 struct pending_cmd *cmd;
4025 struct hci_request req;
4028 BT_DBG("%s", hdev->name);
4030 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4031 hdev->hci_ver < BLUETOOTH_VER_1_2)
4032 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4033 MGMT_STATUS_NOT_SUPPORTED);
4035 if (cp->val != 0x00 && cp->val != 0x01)
4036 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4037 MGMT_STATUS_INVALID_PARAMS);
4039 if (!hdev_is_powered(hdev))
4040 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4041 MGMT_STATUS_NOT_POWERED);
4043 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4044 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4045 MGMT_STATUS_REJECTED);
4049 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4050 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4055 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4056 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4061 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4068 hci_req_init(&req, hdev);
4070 write_fast_connectable(&req, cp->val);
4072 err = hci_req_run(&req, fast_connectable_complete);
4074 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4075 MGMT_STATUS_FAILED);
4076 mgmt_pending_remove(cmd);
4080 hci_dev_unlock(hdev);
4085 static void set_bredr_scan(struct hci_request *req)
4087 struct hci_dev *hdev = req->hdev;
4090 /* Ensure that fast connectable is disabled. This function will
4091 * not do anything if the page scan parameters are already what
4094 write_fast_connectable(req, false);
4096 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4098 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4099 scan |= SCAN_INQUIRY;
4102 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4105 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4107 struct pending_cmd *cmd;
4109 BT_DBG("status 0x%02x", status);
4113 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4118 u8 mgmt_err = mgmt_status(status);
4120 /* We need to restore the flag if related HCI commands
4123 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4125 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4127 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4128 new_settings(hdev, cmd->sk);
4131 mgmt_pending_remove(cmd);
4134 hci_dev_unlock(hdev);
4137 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4139 struct mgmt_mode *cp = data;
4140 struct pending_cmd *cmd;
4141 struct hci_request req;
4144 BT_DBG("request for %s", hdev->name);
4146 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4147 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4148 MGMT_STATUS_NOT_SUPPORTED);
4150 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4151 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4152 MGMT_STATUS_REJECTED);
4154 if (cp->val != 0x00 && cp->val != 0x01)
4155 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4156 MGMT_STATUS_INVALID_PARAMS);
4160 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4161 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4165 if (!hdev_is_powered(hdev)) {
4167 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4168 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4169 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4170 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4171 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4174 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4176 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4180 err = new_settings(hdev, sk);
4184 /* Reject disabling when powered on */
4186 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4187 MGMT_STATUS_REJECTED);
4191 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4192 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4197 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4203 /* We need to flip the bit already here so that update_adv_data
4204 * generates the correct flags.
4206 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4208 hci_req_init(&req, hdev);
4210 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4211 set_bredr_scan(&req);
4213 /* Since only the advertising data flags will change, there
4214 * is no need to update the scan response data.
4216 update_adv_data(&req);
4218 err = hci_req_run(&req, set_bredr_complete);
4220 mgmt_pending_remove(cmd);
4223 hci_dev_unlock(hdev);
4227 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4228 void *data, u16 len)
4230 struct mgmt_mode *cp = data;
4231 struct pending_cmd *cmd;
4235 BT_DBG("request for %s", hdev->name);
4237 status = mgmt_bredr_support(hdev);
4239 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4242 if (!lmp_sc_capable(hdev) &&
4243 !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4244 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4245 MGMT_STATUS_NOT_SUPPORTED);
4247 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4248 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4249 MGMT_STATUS_INVALID_PARAMS);
4253 if (!hdev_is_powered(hdev)) {
4257 changed = !test_and_set_bit(HCI_SC_ENABLED,
4259 if (cp->val == 0x02)
4260 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4262 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4264 changed = test_and_clear_bit(HCI_SC_ENABLED,
4266 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4269 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4274 err = new_settings(hdev, sk);
4279 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4280 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4287 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4288 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4289 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4293 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4299 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4301 mgmt_pending_remove(cmd);
4305 if (cp->val == 0x02)
4306 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4308 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4311 hci_dev_unlock(hdev);
4315 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4316 void *data, u16 len)
4318 struct mgmt_mode *cp = data;
4322 BT_DBG("request for %s", hdev->name);
4324 if (cp->val != 0x00 && cp->val != 0x01)
4325 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4326 MGMT_STATUS_INVALID_PARAMS);
4331 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4333 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4335 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4340 err = new_settings(hdev, sk);
4343 hci_dev_unlock(hdev);
4347 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4350 struct mgmt_cp_set_privacy *cp = cp_data;
4354 BT_DBG("request for %s", hdev->name);
4356 if (!lmp_le_capable(hdev))
4357 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4358 MGMT_STATUS_NOT_SUPPORTED);
4360 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4361 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4362 MGMT_STATUS_INVALID_PARAMS);
4364 if (hdev_is_powered(hdev))
4365 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4366 MGMT_STATUS_REJECTED);
4370 /* If user space supports this command it is also expected to
4371 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4373 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4376 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4377 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4378 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4380 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4381 memset(hdev->irk, 0, sizeof(hdev->irk));
4382 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4385 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4390 err = new_settings(hdev, sk);
4393 hci_dev_unlock(hdev);
4397 static bool irk_is_valid(struct mgmt_irk_info *irk)
4399 switch (irk->addr.type) {
4400 case BDADDR_LE_PUBLIC:
4403 case BDADDR_LE_RANDOM:
4404 /* Two most significant bits shall be set */
4405 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4413 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4416 struct mgmt_cp_load_irks *cp = cp_data;
4417 u16 irk_count, expected_len;
4420 BT_DBG("request for %s", hdev->name);
4422 if (!lmp_le_capable(hdev))
4423 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4424 MGMT_STATUS_NOT_SUPPORTED);
4426 irk_count = __le16_to_cpu(cp->irk_count);
4428 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4429 if (expected_len != len) {
4430 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4432 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4433 MGMT_STATUS_INVALID_PARAMS);
4436 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4438 for (i = 0; i < irk_count; i++) {
4439 struct mgmt_irk_info *key = &cp->irks[i];
4441 if (!irk_is_valid(key))
4442 return cmd_status(sk, hdev->id,
4444 MGMT_STATUS_INVALID_PARAMS);
4449 hci_smp_irks_clear(hdev);
4451 for (i = 0; i < irk_count; i++) {
4452 struct mgmt_irk_info *irk = &cp->irks[i];
4455 if (irk->addr.type == BDADDR_LE_PUBLIC)
4456 addr_type = ADDR_LE_DEV_PUBLIC;
4458 addr_type = ADDR_LE_DEV_RANDOM;
4460 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4464 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4466 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4468 hci_dev_unlock(hdev);
4473 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4475 if (key->master != 0x00 && key->master != 0x01)
4478 switch (key->addr.type) {
4479 case BDADDR_LE_PUBLIC:
4482 case BDADDR_LE_RANDOM:
4483 /* Two most significant bits shall be set */
4484 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4492 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4493 void *cp_data, u16 len)
4495 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4496 u16 key_count, expected_len;
4499 BT_DBG("request for %s", hdev->name);
4501 if (!lmp_le_capable(hdev))
4502 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4503 MGMT_STATUS_NOT_SUPPORTED);
4505 key_count = __le16_to_cpu(cp->key_count);
4507 expected_len = sizeof(*cp) + key_count *
4508 sizeof(struct mgmt_ltk_info);
4509 if (expected_len != len) {
4510 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4512 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4513 MGMT_STATUS_INVALID_PARAMS);
4516 BT_DBG("%s key_count %u", hdev->name, key_count);
4518 for (i = 0; i < key_count; i++) {
4519 struct mgmt_ltk_info *key = &cp->keys[i];
4521 if (!ltk_is_valid(key))
4522 return cmd_status(sk, hdev->id,
4523 MGMT_OP_LOAD_LONG_TERM_KEYS,
4524 MGMT_STATUS_INVALID_PARAMS);
4529 hci_smp_ltks_clear(hdev);
4531 for (i = 0; i < key_count; i++) {
4532 struct mgmt_ltk_info *key = &cp->keys[i];
4535 if (key->addr.type == BDADDR_LE_PUBLIC)
4536 addr_type = ADDR_LE_DEV_PUBLIC;
4538 addr_type = ADDR_LE_DEV_RANDOM;
4543 type = HCI_SMP_LTK_SLAVE;
4545 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4546 key->type, key->val, key->enc_size, key->ediv,
4550 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4553 hci_dev_unlock(hdev);
4558 static const struct mgmt_handler {
4559 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4563 } mgmt_handlers[] = {
4564 { NULL }, /* 0x0000 (no command) */
4565 { read_version, false, MGMT_READ_VERSION_SIZE },
4566 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4567 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4568 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4569 { set_powered, false, MGMT_SETTING_SIZE },
4570 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4571 { set_connectable, false, MGMT_SETTING_SIZE },
4572 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4573 { set_pairable, false, MGMT_SETTING_SIZE },
4574 { set_link_security, false, MGMT_SETTING_SIZE },
4575 { set_ssp, false, MGMT_SETTING_SIZE },
4576 { set_hs, false, MGMT_SETTING_SIZE },
4577 { set_le, false, MGMT_SETTING_SIZE },
4578 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4579 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4580 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4581 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4582 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4583 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4584 { disconnect, false, MGMT_DISCONNECT_SIZE },
4585 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4586 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4587 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4588 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4589 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4590 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4591 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4592 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4593 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4594 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4595 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4596 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4597 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4598 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4599 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4600 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4601 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4602 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4603 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4604 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4605 { set_advertising, false, MGMT_SETTING_SIZE },
4606 { set_bredr, false, MGMT_SETTING_SIZE },
4607 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4608 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4609 { set_secure_conn, false, MGMT_SETTING_SIZE },
4610 { set_debug_keys, false, MGMT_SETTING_SIZE },
4611 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4612 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4616 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4620 struct mgmt_hdr *hdr;
4621 u16 opcode, index, len;
4622 struct hci_dev *hdev = NULL;
4623 const struct mgmt_handler *handler;
4626 BT_DBG("got %zu bytes", msglen);
4628 if (msglen < sizeof(*hdr))
4631 buf = kmalloc(msglen, GFP_KERNEL);
4635 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4641 opcode = __le16_to_cpu(hdr->opcode);
4642 index = __le16_to_cpu(hdr->index);
4643 len = __le16_to_cpu(hdr->len);
4645 if (len != msglen - sizeof(*hdr)) {
4650 if (index != MGMT_INDEX_NONE) {
4651 hdev = hci_dev_get(index);
4653 err = cmd_status(sk, index, opcode,
4654 MGMT_STATUS_INVALID_INDEX);
4658 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4659 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4660 err = cmd_status(sk, index, opcode,
4661 MGMT_STATUS_INVALID_INDEX);
4666 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4667 mgmt_handlers[opcode].func == NULL) {
4668 BT_DBG("Unknown op %u", opcode);
4669 err = cmd_status(sk, index, opcode,
4670 MGMT_STATUS_UNKNOWN_COMMAND);
4674 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4675 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4676 err = cmd_status(sk, index, opcode,
4677 MGMT_STATUS_INVALID_INDEX);
4681 handler = &mgmt_handlers[opcode];
4683 if ((handler->var_len && len < handler->data_len) ||
4684 (!handler->var_len && len != handler->data_len)) {
4685 err = cmd_status(sk, index, opcode,
4686 MGMT_STATUS_INVALID_PARAMS);
4691 mgmt_init_hdev(sk, hdev);
4693 cp = buf + sizeof(*hdr);
4695 err = handler->func(sk, hdev, cp, len);
4709 void mgmt_index_added(struct hci_dev *hdev)
4711 if (hdev->dev_type != HCI_BREDR)
4714 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4717 void mgmt_index_removed(struct hci_dev *hdev)
4719 u8 status = MGMT_STATUS_INVALID_INDEX;
4721 if (hdev->dev_type != HCI_BREDR)
4724 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4726 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4729 /* This function requires the caller holds hdev->lock */
4730 static void restart_le_auto_conns(struct hci_dev *hdev)
4732 struct hci_conn_params *p;
4734 list_for_each_entry(p, &hdev->le_conn_params, list) {
4735 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
4736 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
4740 static void powered_complete(struct hci_dev *hdev, u8 status)
4742 struct cmd_lookup match = { NULL, hdev };
4744 BT_DBG("status 0x%02x", status);
4748 restart_le_auto_conns(hdev);
4750 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4752 new_settings(hdev, match.sk);
4754 hci_dev_unlock(hdev);
4760 static int powered_update_hci(struct hci_dev *hdev)
4762 struct hci_request req;
4765 hci_req_init(&req, hdev);
4767 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4768 !lmp_host_ssp_capable(hdev)) {
4771 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4774 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4775 lmp_bredr_capable(hdev)) {
4776 struct hci_cp_write_le_host_supported cp;
4779 cp.simul = lmp_le_br_capable(hdev);
4781 /* Check first if we already have the right
4782 * host state (host features set)
4784 if (cp.le != lmp_host_le_capable(hdev) ||
4785 cp.simul != lmp_host_le_br_capable(hdev))
4786 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4790 if (lmp_le_capable(hdev)) {
4791 /* Make sure the controller has a good default for
4792 * advertising data. This also applies to the case
4793 * where BR/EDR was toggled during the AUTO_OFF phase.
4795 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4796 update_adv_data(&req);
4797 update_scan_rsp_data(&req);
4800 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4801 enable_advertising(&req);
4804 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4805 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4806 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4807 sizeof(link_sec), &link_sec);
4809 if (lmp_bredr_capable(hdev)) {
4810 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4811 set_bredr_scan(&req);
4817 return hci_req_run(&req, powered_complete);
4820 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4822 struct cmd_lookup match = { NULL, hdev };
4823 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4824 u8 zero_cod[] = { 0, 0, 0 };
4827 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4831 if (powered_update_hci(hdev) == 0)
4834 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4839 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4840 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4842 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4843 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4844 zero_cod, sizeof(zero_cod), NULL);
4847 err = new_settings(hdev, match.sk);
4855 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4857 struct pending_cmd *cmd;
4860 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4864 if (err == -ERFKILL)
4865 status = MGMT_STATUS_RFKILLED;
4867 status = MGMT_STATUS_FAILED;
4869 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4871 mgmt_pending_remove(cmd);
4874 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4876 struct hci_request req;
4880 /* When discoverable timeout triggers, then just make sure
4881 * the limited discoverable flag is cleared. Even in the case
4882 * of a timeout triggered from general discoverable, it is
4883 * safe to unconditionally clear the flag.
4885 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4886 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4888 hci_req_init(&req, hdev);
4889 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4890 u8 scan = SCAN_PAGE;
4891 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4892 sizeof(scan), &scan);
4895 update_adv_data(&req);
4896 hci_req_run(&req, NULL);
4898 hdev->discov_timeout = 0;
4900 new_settings(hdev, NULL);
4902 hci_dev_unlock(hdev);
4905 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4909 /* Nothing needed here if there's a pending command since that
4910 * commands request completion callback takes care of everything
4913 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4916 /* Powering off may clear the scan mode - don't let that interfere */
4917 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4921 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4923 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4924 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4928 struct hci_request req;
4930 /* In case this change in discoverable was triggered by
4931 * a disabling of connectable there could be a need to
4932 * update the advertising flags.
4934 hci_req_init(&req, hdev);
4935 update_adv_data(&req);
4936 hci_req_run(&req, NULL);
4938 new_settings(hdev, NULL);
4942 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4946 /* Nothing needed here if there's a pending command since that
4947 * commands request completion callback takes care of everything
4950 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4953 /* Powering off may clear the scan mode - don't let that interfere */
4954 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4958 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4960 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4963 new_settings(hdev, NULL);
4966 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
4968 /* Powering off may stop advertising - don't let that interfere */
4969 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4973 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4975 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4978 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4980 u8 mgmt_err = mgmt_status(status);
4982 if (scan & SCAN_PAGE)
4983 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4984 cmd_status_rsp, &mgmt_err);
4986 if (scan & SCAN_INQUIRY)
4987 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4988 cmd_status_rsp, &mgmt_err);
4991 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4994 struct mgmt_ev_new_link_key ev;
4996 memset(&ev, 0, sizeof(ev));
4998 ev.store_hint = persistent;
4999 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5000 ev.key.addr.type = BDADDR_BREDR;
5001 ev.key.type = key->type;
5002 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5003 ev.key.pin_len = key->pin_len;
5005 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5008 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5010 struct mgmt_ev_new_long_term_key ev;
5012 memset(&ev, 0, sizeof(ev));
5014 /* Devices using resolvable or non-resolvable random addresses
5015 * without providing an indentity resolving key don't require
5016 * to store long term keys. Their addresses will change the
5019 * Only when a remote device provides an identity address
5020 * make sure the long term key is stored. If the remote
5021 * identity is known, the long term keys are internally
5022 * mapped to the identity address. So allow static random
5023 * and public addresses here.
5025 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5026 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5027 ev.store_hint = 0x00;
5029 ev.store_hint = persistent;
5031 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5032 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5033 ev.key.type = key->authenticated;
5034 ev.key.enc_size = key->enc_size;
5035 ev.key.ediv = key->ediv;
5036 ev.key.rand = key->rand;
5038 if (key->type == HCI_SMP_LTK)
5041 memcpy(ev.key.val, key->val, sizeof(key->val));
5043 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5046 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5048 struct mgmt_ev_new_irk ev;
5050 memset(&ev, 0, sizeof(ev));
5052 /* For identity resolving keys from devices that are already
5053 * using a public address or static random address, do not
5054 * ask for storing this key. The identity resolving key really
5055 * is only mandatory for devices using resovlable random
5058 * Storing all identity resolving keys has the downside that
5059 * they will be also loaded on next boot of they system. More
5060 * identity resolving keys, means more time during scanning is
5061 * needed to actually resolve these addresses.
5063 if (bacmp(&irk->rpa, BDADDR_ANY))
5064 ev.store_hint = 0x01;
5066 ev.store_hint = 0x00;
5068 bacpy(&ev.rpa, &irk->rpa);
5069 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5070 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5071 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5073 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5076 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5079 struct mgmt_ev_new_csrk ev;
5081 memset(&ev, 0, sizeof(ev));
5083 /* Devices using resolvable or non-resolvable random addresses
5084 * without providing an indentity resolving key don't require
5085 * to store signature resolving keys. Their addresses will change
5086 * the next time around.
5088 * Only when a remote device provides an identity address
5089 * make sure the signature resolving key is stored. So allow
5090 * static random and public addresses here.
5092 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5093 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5094 ev.store_hint = 0x00;
5096 ev.store_hint = persistent;
5098 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5099 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5100 ev.key.master = csrk->master;
5101 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5103 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5106 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5109 eir[eir_len++] = sizeof(type) + data_len;
5110 eir[eir_len++] = type;
5111 memcpy(&eir[eir_len], data, data_len);
5112 eir_len += data_len;
5117 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5118 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5122 struct mgmt_ev_device_connected *ev = (void *) buf;
5125 bacpy(&ev->addr.bdaddr, bdaddr);
5126 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5128 ev->flags = __cpu_to_le32(flags);
5131 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5134 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5135 eir_len = eir_append_data(ev->eir, eir_len,
5136 EIR_CLASS_OF_DEV, dev_class, 3);
5138 ev->eir_len = cpu_to_le16(eir_len);
5140 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5141 sizeof(*ev) + eir_len, NULL);
5144 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5146 struct mgmt_cp_disconnect *cp = cmd->param;
5147 struct sock **sk = data;
5148 struct mgmt_rp_disconnect rp;
5150 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5151 rp.addr.type = cp->addr.type;
5153 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5159 mgmt_pending_remove(cmd);
5162 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5164 struct hci_dev *hdev = data;
5165 struct mgmt_cp_unpair_device *cp = cmd->param;
5166 struct mgmt_rp_unpair_device rp;
5168 memset(&rp, 0, sizeof(rp));
5169 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5170 rp.addr.type = cp->addr.type;
5172 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5174 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5176 mgmt_pending_remove(cmd);
5179 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5180 u8 link_type, u8 addr_type, u8 reason,
5181 bool mgmt_connected)
5183 struct mgmt_ev_device_disconnected ev;
5184 struct pending_cmd *power_off;
5185 struct sock *sk = NULL;
5187 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5189 struct mgmt_mode *cp = power_off->param;
5191 /* The connection is still in hci_conn_hash so test for 1
5192 * instead of 0 to know if this is the last one.
5194 if (!cp->val && hci_conn_count(hdev) == 1) {
5195 cancel_delayed_work(&hdev->power_off);
5196 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5200 if (!mgmt_connected)
5203 if (link_type != ACL_LINK && link_type != LE_LINK)
5206 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5208 bacpy(&ev.addr.bdaddr, bdaddr);
5209 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5212 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5217 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5221 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5222 u8 link_type, u8 addr_type, u8 status)
5224 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5225 struct mgmt_cp_disconnect *cp;
5226 struct mgmt_rp_disconnect rp;
5227 struct pending_cmd *cmd;
5229 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5232 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5238 if (bacmp(bdaddr, &cp->addr.bdaddr))
5241 if (cp->addr.type != bdaddr_type)
5244 bacpy(&rp.addr.bdaddr, bdaddr);
5245 rp.addr.type = bdaddr_type;
5247 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5248 mgmt_status(status), &rp, sizeof(rp));
5250 mgmt_pending_remove(cmd);
5253 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5254 u8 addr_type, u8 status)
5256 struct mgmt_ev_connect_failed ev;
5257 struct pending_cmd *power_off;
5259 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5261 struct mgmt_mode *cp = power_off->param;
5263 /* The connection is still in hci_conn_hash so test for 1
5264 * instead of 0 to know if this is the last one.
5266 if (!cp->val && hci_conn_count(hdev) == 1) {
5267 cancel_delayed_work(&hdev->power_off);
5268 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5272 bacpy(&ev.addr.bdaddr, bdaddr);
5273 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5274 ev.status = mgmt_status(status);
5276 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5279 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5281 struct mgmt_ev_pin_code_request ev;
5283 bacpy(&ev.addr.bdaddr, bdaddr);
5284 ev.addr.type = BDADDR_BREDR;
5287 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5290 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5293 struct pending_cmd *cmd;
5294 struct mgmt_rp_pin_code_reply rp;
5296 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5300 bacpy(&rp.addr.bdaddr, bdaddr);
5301 rp.addr.type = BDADDR_BREDR;
5303 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5304 mgmt_status(status), &rp, sizeof(rp));
5306 mgmt_pending_remove(cmd);
5309 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5312 struct pending_cmd *cmd;
5313 struct mgmt_rp_pin_code_reply rp;
5315 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5319 bacpy(&rp.addr.bdaddr, bdaddr);
5320 rp.addr.type = BDADDR_BREDR;
5322 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5323 mgmt_status(status), &rp, sizeof(rp));
5325 mgmt_pending_remove(cmd);
5328 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5329 u8 link_type, u8 addr_type, u32 value,
5332 struct mgmt_ev_user_confirm_request ev;
5334 BT_DBG("%s", hdev->name);
5336 bacpy(&ev.addr.bdaddr, bdaddr);
5337 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5338 ev.confirm_hint = confirm_hint;
5339 ev.value = cpu_to_le32(value);
5341 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5345 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5346 u8 link_type, u8 addr_type)
5348 struct mgmt_ev_user_passkey_request ev;
5350 BT_DBG("%s", hdev->name);
5352 bacpy(&ev.addr.bdaddr, bdaddr);
5353 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5355 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5359 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5360 u8 link_type, u8 addr_type, u8 status,
5363 struct pending_cmd *cmd;
5364 struct mgmt_rp_user_confirm_reply rp;
5367 cmd = mgmt_pending_find(opcode, hdev);
5371 bacpy(&rp.addr.bdaddr, bdaddr);
5372 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5373 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5376 mgmt_pending_remove(cmd);
5381 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5382 u8 link_type, u8 addr_type, u8 status)
5384 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5385 status, MGMT_OP_USER_CONFIRM_REPLY);
5388 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5389 u8 link_type, u8 addr_type, u8 status)
5391 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5393 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5396 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5397 u8 link_type, u8 addr_type, u8 status)
5399 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5400 status, MGMT_OP_USER_PASSKEY_REPLY);
5403 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5404 u8 link_type, u8 addr_type, u8 status)
5406 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5408 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5411 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5412 u8 link_type, u8 addr_type, u32 passkey,
5415 struct mgmt_ev_passkey_notify ev;
5417 BT_DBG("%s", hdev->name);
5419 bacpy(&ev.addr.bdaddr, bdaddr);
5420 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5421 ev.passkey = __cpu_to_le32(passkey);
5422 ev.entered = entered;
5424 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5427 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5428 u8 addr_type, u8 status)
5430 struct mgmt_ev_auth_failed ev;
5432 bacpy(&ev.addr.bdaddr, bdaddr);
5433 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5434 ev.status = mgmt_status(status);
5436 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5439 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5441 struct cmd_lookup match = { NULL, hdev };
5445 u8 mgmt_err = mgmt_status(status);
5446 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5447 cmd_status_rsp, &mgmt_err);
5451 if (test_bit(HCI_AUTH, &hdev->flags))
5452 changed = !test_and_set_bit(HCI_LINK_SECURITY,
5455 changed = test_and_clear_bit(HCI_LINK_SECURITY,
5458 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5462 new_settings(hdev, match.sk);
5468 static void clear_eir(struct hci_request *req)
5470 struct hci_dev *hdev = req->hdev;
5471 struct hci_cp_write_eir cp;
5473 if (!lmp_ext_inq_capable(hdev))
5476 memset(hdev->eir, 0, sizeof(hdev->eir));
5478 memset(&cp, 0, sizeof(cp));
5480 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5483 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5485 struct cmd_lookup match = { NULL, hdev };
5486 struct hci_request req;
5487 bool changed = false;
5490 u8 mgmt_err = mgmt_status(status);
5492 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5493 &hdev->dev_flags)) {
5494 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5495 new_settings(hdev, NULL);
5498 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5504 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5506 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5508 changed = test_and_clear_bit(HCI_HS_ENABLED,
5511 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5514 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5517 new_settings(hdev, match.sk);
5522 hci_req_init(&req, hdev);
5524 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5529 hci_req_run(&req, NULL);
5532 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5534 struct cmd_lookup match = { NULL, hdev };
5535 bool changed = false;
5538 u8 mgmt_err = mgmt_status(status);
5541 if (test_and_clear_bit(HCI_SC_ENABLED,
5543 new_settings(hdev, NULL);
5544 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5547 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5548 cmd_status_rsp, &mgmt_err);
5553 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5555 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5556 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5559 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5560 settings_rsp, &match);
5563 new_settings(hdev, match.sk);
5569 static void sk_lookup(struct pending_cmd *cmd, void *data)
5571 struct cmd_lookup *match = data;
5573 if (match->sk == NULL) {
5574 match->sk = cmd->sk;
5575 sock_hold(match->sk);
5579 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5582 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5584 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5585 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5586 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5589 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5596 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5598 struct mgmt_cp_set_local_name ev;
5599 struct pending_cmd *cmd;
5604 memset(&ev, 0, sizeof(ev));
5605 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5606 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5608 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5610 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5612 /* If this is a HCI command related to powering on the
5613 * HCI dev don't send any mgmt signals.
5615 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5619 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5620 cmd ? cmd->sk : NULL);
5623 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5624 u8 *randomizer192, u8 *hash256,
5625 u8 *randomizer256, u8 status)
5627 struct pending_cmd *cmd;
5629 BT_DBG("%s status %u", hdev->name, status);
5631 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5636 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5637 mgmt_status(status));
5639 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5640 hash256 && randomizer256) {
5641 struct mgmt_rp_read_local_oob_ext_data rp;
5643 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5644 memcpy(rp.randomizer192, randomizer192,
5645 sizeof(rp.randomizer192));
5647 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5648 memcpy(rp.randomizer256, randomizer256,
5649 sizeof(rp.randomizer256));
5651 cmd_complete(cmd->sk, hdev->id,
5652 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5655 struct mgmt_rp_read_local_oob_data rp;
5657 memcpy(rp.hash, hash192, sizeof(rp.hash));
5658 memcpy(rp.randomizer, randomizer192,
5659 sizeof(rp.randomizer));
5661 cmd_complete(cmd->sk, hdev->id,
5662 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5667 mgmt_pending_remove(cmd);
5670 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5671 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
5672 ssp, u8 *eir, u16 eir_len)
5675 struct mgmt_ev_device_found *ev = (void *) buf;
5676 struct smp_irk *irk;
5679 if (!hci_discovery_active(hdev))
5682 /* Leave 5 bytes for a potential CoD field */
5683 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5686 memset(buf, 0, sizeof(buf));
5688 irk = hci_get_irk(hdev, bdaddr, addr_type);
5690 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5691 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5693 bacpy(&ev->addr.bdaddr, bdaddr);
5694 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5699 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5701 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5704 memcpy(ev->eir, eir, eir_len);
5706 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5707 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5710 ev->eir_len = cpu_to_le16(eir_len);
5711 ev_size = sizeof(*ev) + eir_len;
5713 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5716 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5717 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5719 struct mgmt_ev_device_found *ev;
5720 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5723 ev = (struct mgmt_ev_device_found *) buf;
5725 memset(buf, 0, sizeof(buf));
5727 bacpy(&ev->addr.bdaddr, bdaddr);
5728 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5731 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5734 ev->eir_len = cpu_to_le16(eir_len);
5736 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5739 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5741 struct mgmt_ev_discovering ev;
5742 struct pending_cmd *cmd;
5744 BT_DBG("%s discovering %u", hdev->name, discovering);
5747 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5749 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5752 u8 type = hdev->discovery.type;
5754 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5756 mgmt_pending_remove(cmd);
5759 memset(&ev, 0, sizeof(ev));
5760 ev.type = hdev->discovery.type;
5761 ev.discovering = discovering;
5763 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5766 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5768 struct pending_cmd *cmd;
5769 struct mgmt_ev_device_blocked ev;
5771 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5773 bacpy(&ev.addr.bdaddr, bdaddr);
5774 ev.addr.type = type;
5776 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5777 cmd ? cmd->sk : NULL);
5780 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5782 struct pending_cmd *cmd;
5783 struct mgmt_ev_device_unblocked ev;
5785 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5787 bacpy(&ev.addr.bdaddr, bdaddr);
5788 ev.addr.type = type;
5790 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5791 cmd ? cmd->sk : NULL);
5794 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5796 BT_DBG("%s status %u", hdev->name, status);
5798 /* Clear the advertising mgmt setting if we failed to re-enable it */
5800 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5801 new_settings(hdev, NULL);
5805 void mgmt_reenable_advertising(struct hci_dev *hdev)
5807 struct hci_request req;
5809 if (hci_conn_num(hdev, LE_LINK) > 0)
5812 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5815 hci_req_init(&req, hdev);
5816 enable_advertising(&req);
5818 /* If this fails we have no option but to let user space know
5819 * that we've disabled advertising.
5821 if (hci_req_run(&req, adv_enable_complete) < 0) {
5822 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5823 new_settings(hdev, NULL);