2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
84 MGMT_EV_INDEX_REMOVED,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
106 * These LE scan and inquiry parameters were chosen according to LE General
107 * Discovery Procedure specification.
109 #define LE_SCAN_TYPE 0x01
110 #define LE_SCAN_WIN 0x12
111 #define LE_SCAN_INT 0x12
112 #define LE_SCAN_TIMEOUT_LE_ONLY 10240 /* TGAP(gen_disc_scan_min) */
113 #define LE_SCAN_TIMEOUT_BREDR_LE 5120 /* TGAP(100)/2 */
115 #define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
116 #define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
118 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
120 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
121 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
124 struct list_head list;
132 /* HCI to MGMT error code conversion table */
133 static u8 mgmt_status_table[] = {
135 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
136 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
137 MGMT_STATUS_FAILED, /* Hardware Failure */
138 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
139 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
140 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
141 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
142 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
143 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
144 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
145 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
146 MGMT_STATUS_BUSY, /* Command Disallowed */
147 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
148 MGMT_STATUS_REJECTED, /* Rejected Security */
149 MGMT_STATUS_REJECTED, /* Rejected Personal */
150 MGMT_STATUS_TIMEOUT, /* Host Timeout */
151 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
152 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
153 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
154 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
155 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
156 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
157 MGMT_STATUS_BUSY, /* Repeated Attempts */
158 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
159 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
161 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
162 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
163 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
165 MGMT_STATUS_FAILED, /* Unspecified Error */
166 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
167 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
168 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
169 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
170 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
171 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
172 MGMT_STATUS_FAILED, /* Unit Link Key Used */
173 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
174 MGMT_STATUS_TIMEOUT, /* Instant Passed */
175 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
176 MGMT_STATUS_FAILED, /* Transaction Collision */
177 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
178 MGMT_STATUS_REJECTED, /* QoS Rejected */
179 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
180 MGMT_STATUS_REJECTED, /* Insufficient Security */
181 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
182 MGMT_STATUS_BUSY, /* Role Switch Pending */
183 MGMT_STATUS_FAILED, /* Slot Violation */
184 MGMT_STATUS_FAILED, /* Role Switch Failed */
185 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
186 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
187 MGMT_STATUS_BUSY, /* Host Busy Pairing */
188 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
189 MGMT_STATUS_BUSY, /* Controller Busy */
190 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
191 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
192 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
193 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
194 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
197 bool mgmt_valid_hdev(struct hci_dev *hdev)
199 return hdev->dev_type == HCI_BREDR;
202 static u8 mgmt_status(u8 hci_status)
204 if (hci_status < ARRAY_SIZE(mgmt_status_table))
205 return mgmt_status_table[hci_status];
207 return MGMT_STATUS_FAILED;
210 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
213 struct mgmt_hdr *hdr;
214 struct mgmt_ev_cmd_status *ev;
217 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
219 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
223 hdr = (void *) skb_put(skb, sizeof(*hdr));
225 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
226 hdr->index = cpu_to_le16(index);
227 hdr->len = cpu_to_le16(sizeof(*ev));
229 ev = (void *) skb_put(skb, sizeof(*ev));
231 ev->opcode = cpu_to_le16(cmd);
233 err = sock_queue_rcv_skb(sk, skb);
240 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
241 void *rp, size_t rp_len)
244 struct mgmt_hdr *hdr;
245 struct mgmt_ev_cmd_complete *ev;
248 BT_DBG("sock %p", sk);
250 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
254 hdr = (void *) skb_put(skb, sizeof(*hdr));
256 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
257 hdr->index = cpu_to_le16(index);
258 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
260 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
261 ev->opcode = cpu_to_le16(cmd);
265 memcpy(ev->data, rp, rp_len);
267 err = sock_queue_rcv_skb(sk, skb);
274 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
277 struct mgmt_rp_read_version rp;
279 BT_DBG("sock %p", sk);
281 rp.version = MGMT_VERSION;
282 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
284 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
288 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
291 struct mgmt_rp_read_commands *rp;
292 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
293 const u16 num_events = ARRAY_SIZE(mgmt_events);
298 BT_DBG("sock %p", sk);
300 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
302 rp = kmalloc(rp_size, GFP_KERNEL);
306 rp->num_commands = __constant_cpu_to_le16(num_commands);
307 rp->num_events = __constant_cpu_to_le16(num_events);
309 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
310 put_unaligned_le16(mgmt_commands[i], opcode);
312 for (i = 0; i < num_events; i++, opcode++)
313 put_unaligned_le16(mgmt_events[i], opcode);
315 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
322 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
325 struct mgmt_rp_read_index_list *rp;
331 BT_DBG("sock %p", sk);
333 read_lock(&hci_dev_list_lock);
336 list_for_each_entry(d, &hci_dev_list, list) {
337 if (!mgmt_valid_hdev(d))
343 rp_len = sizeof(*rp) + (2 * count);
344 rp = kmalloc(rp_len, GFP_ATOMIC);
346 read_unlock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (test_bit(HCI_SETUP, &d->dev_flags))
355 if (!mgmt_valid_hdev(d))
358 rp->index[count++] = cpu_to_le16(d->id);
359 BT_DBG("Added hci%u", d->id);
362 rp->num_controllers = cpu_to_le16(count);
363 rp_len = sizeof(*rp) + (2 * count);
365 read_unlock(&hci_dev_list_lock);
367 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
375 static u32 get_supported_settings(struct hci_dev *hdev)
379 settings |= MGMT_SETTING_POWERED;
380 settings |= MGMT_SETTING_PAIRABLE;
382 if (lmp_ssp_capable(hdev))
383 settings |= MGMT_SETTING_SSP;
385 if (lmp_bredr_capable(hdev)) {
386 settings |= MGMT_SETTING_CONNECTABLE;
387 settings |= MGMT_SETTING_FAST_CONNECTABLE;
388 settings |= MGMT_SETTING_DISCOVERABLE;
389 settings |= MGMT_SETTING_BREDR;
390 settings |= MGMT_SETTING_LINK_SECURITY;
394 settings |= MGMT_SETTING_HS;
396 if (lmp_le_capable(hdev))
397 settings |= MGMT_SETTING_LE;
402 static u32 get_current_settings(struct hci_dev *hdev)
406 if (hdev_is_powered(hdev))
407 settings |= MGMT_SETTING_POWERED;
409 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_CONNECTABLE;
412 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
413 settings |= MGMT_SETTING_DISCOVERABLE;
415 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
416 settings |= MGMT_SETTING_PAIRABLE;
418 if (lmp_bredr_capable(hdev))
419 settings |= MGMT_SETTING_BREDR;
421 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
422 settings |= MGMT_SETTING_LE;
424 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
425 settings |= MGMT_SETTING_LINK_SECURITY;
427 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
428 settings |= MGMT_SETTING_SSP;
430 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
431 settings |= MGMT_SETTING_HS;
436 #define PNP_INFO_SVCLASS_ID 0x1200
438 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
440 u8 *ptr = data, *uuids_start = NULL;
441 struct bt_uuid *uuid;
446 list_for_each_entry(uuid, &hdev->uuids, list) {
449 if (uuid->size != 16)
452 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
456 if (uuid16 == PNP_INFO_SVCLASS_ID)
462 uuids_start[1] = EIR_UUID16_ALL;
466 /* Stop if not enough space to put next UUID */
467 if ((ptr - data) + sizeof(u16) > len) {
468 uuids_start[1] = EIR_UUID16_SOME;
472 *ptr++ = (uuid16 & 0x00ff);
473 *ptr++ = (uuid16 & 0xff00) >> 8;
474 uuids_start[0] += sizeof(uuid16);
480 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
482 u8 *ptr = data, *uuids_start = NULL;
483 struct bt_uuid *uuid;
488 list_for_each_entry(uuid, &hdev->uuids, list) {
489 if (uuid->size != 32)
495 uuids_start[1] = EIR_UUID32_ALL;
499 /* Stop if not enough space to put next UUID */
500 if ((ptr - data) + sizeof(u32) > len) {
501 uuids_start[1] = EIR_UUID32_SOME;
505 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
507 uuids_start[0] += sizeof(u32);
513 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515 u8 *ptr = data, *uuids_start = NULL;
516 struct bt_uuid *uuid;
521 list_for_each_entry(uuid, &hdev->uuids, list) {
522 if (uuid->size != 128)
528 uuids_start[1] = EIR_UUID128_ALL;
532 /* Stop if not enough space to put next UUID */
533 if ((ptr - data) + 16 > len) {
534 uuids_start[1] = EIR_UUID128_SOME;
538 memcpy(ptr, uuid->uuid, 16);
540 uuids_start[0] += 16;
546 static void create_eir(struct hci_dev *hdev, u8 *data)
551 name_len = strlen(hdev->dev_name);
557 ptr[1] = EIR_NAME_SHORT;
559 ptr[1] = EIR_NAME_COMPLETE;
561 /* EIR Data length */
562 ptr[0] = name_len + 1;
564 memcpy(ptr + 2, hdev->dev_name, name_len);
566 ptr += (name_len + 2);
569 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
571 ptr[1] = EIR_TX_POWER;
572 ptr[2] = (u8) hdev->inq_tx_power;
577 if (hdev->devid_source > 0) {
579 ptr[1] = EIR_DEVICE_ID;
581 put_unaligned_le16(hdev->devid_source, ptr + 2);
582 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
583 put_unaligned_le16(hdev->devid_product, ptr + 6);
584 put_unaligned_le16(hdev->devid_version, ptr + 8);
589 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
590 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
591 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
594 static void update_eir(struct hci_request *req)
596 struct hci_dev *hdev = req->hdev;
597 struct hci_cp_write_eir cp;
599 if (!hdev_is_powered(hdev))
602 if (!lmp_ext_inq_capable(hdev))
605 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
608 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
611 memset(&cp, 0, sizeof(cp));
613 create_eir(hdev, cp.data);
615 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
618 memcpy(hdev->eir, cp.data, sizeof(cp.data));
620 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
623 static u8 get_service_classes(struct hci_dev *hdev)
625 struct bt_uuid *uuid;
628 list_for_each_entry(uuid, &hdev->uuids, list)
629 val |= uuid->svc_hint;
634 static void update_class(struct hci_request *req)
636 struct hci_dev *hdev = req->hdev;
639 BT_DBG("%s", hdev->name);
641 if (!hdev_is_powered(hdev))
644 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
647 cod[0] = hdev->minor_class;
648 cod[1] = hdev->major_class;
649 cod[2] = get_service_classes(hdev);
651 if (memcmp(cod, hdev->dev_class, 3) == 0)
654 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
656 set_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
659 static void service_cache_off(struct work_struct *work)
661 struct hci_dev *hdev = container_of(work, struct hci_dev,
663 struct hci_request req;
665 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
668 hci_req_init(&req, hdev);
675 hci_dev_unlock(hdev);
677 hci_req_run(&req, NULL);
680 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
682 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
685 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
687 /* Non-mgmt controlled devices get this bit set
688 * implicitly so that pairing works for them, however
689 * for mgmt we require user-space to explicitly enable
692 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
695 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
696 void *data, u16 data_len)
698 struct mgmt_rp_read_info rp;
700 BT_DBG("sock %p %s", sk, hdev->name);
704 memset(&rp, 0, sizeof(rp));
706 bacpy(&rp.bdaddr, &hdev->bdaddr);
708 rp.version = hdev->hci_ver;
709 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
711 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
712 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
714 memcpy(rp.dev_class, hdev->dev_class, 3);
716 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
717 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
719 hci_dev_unlock(hdev);
721 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
725 static void mgmt_pending_free(struct pending_cmd *cmd)
732 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
733 struct hci_dev *hdev, void *data,
736 struct pending_cmd *cmd;
738 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
742 cmd->opcode = opcode;
743 cmd->index = hdev->id;
745 cmd->param = kmalloc(len, GFP_KERNEL);
752 memcpy(cmd->param, data, len);
757 list_add(&cmd->list, &hdev->mgmt_pending);
762 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
763 void (*cb)(struct pending_cmd *cmd,
767 struct pending_cmd *cmd, *tmp;
769 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
770 if (opcode > 0 && cmd->opcode != opcode)
777 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
779 struct pending_cmd *cmd;
781 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
782 if (cmd->opcode == opcode)
789 static void mgmt_pending_remove(struct pending_cmd *cmd)
791 list_del(&cmd->list);
792 mgmt_pending_free(cmd);
795 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
797 __le32 settings = cpu_to_le32(get_current_settings(hdev));
799 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
803 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
806 struct mgmt_mode *cp = data;
807 struct pending_cmd *cmd;
810 BT_DBG("request for %s", hdev->name);
812 if (cp->val != 0x00 && cp->val != 0x01)
813 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
814 MGMT_STATUS_INVALID_PARAMS);
818 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
819 cancel_delayed_work(&hdev->power_off);
822 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
824 err = mgmt_powered(hdev, 1);
829 if (!!cp->val == hdev_is_powered(hdev)) {
830 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
834 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
835 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
840 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
847 queue_work(hdev->req_workqueue, &hdev->power_on);
849 queue_work(hdev->req_workqueue, &hdev->power_off.work);
854 hci_dev_unlock(hdev);
858 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
859 struct sock *skip_sk)
862 struct mgmt_hdr *hdr;
864 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
868 hdr = (void *) skb_put(skb, sizeof(*hdr));
869 hdr->opcode = cpu_to_le16(event);
871 hdr->index = cpu_to_le16(hdev->id);
873 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
874 hdr->len = cpu_to_le16(data_len);
877 memcpy(skb_put(skb, data_len), data, data_len);
880 __net_timestamp(skb);
882 hci_send_to_control(skb, skip_sk);
888 static int new_settings(struct hci_dev *hdev, struct sock *skip)
892 ev = cpu_to_le32(get_current_settings(hdev));
894 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
897 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
900 struct mgmt_cp_set_discoverable *cp = data;
901 struct pending_cmd *cmd;
906 BT_DBG("request for %s", hdev->name);
908 if (!lmp_bredr_capable(hdev))
909 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
910 MGMT_STATUS_NOT_SUPPORTED);
912 if (cp->val != 0x00 && cp->val != 0x01)
913 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
914 MGMT_STATUS_INVALID_PARAMS);
916 timeout = __le16_to_cpu(cp->timeout);
917 if (!cp->val && timeout > 0)
918 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
919 MGMT_STATUS_INVALID_PARAMS);
923 if (!hdev_is_powered(hdev) && timeout > 0) {
924 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
925 MGMT_STATUS_NOT_POWERED);
929 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
930 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
931 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
936 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
937 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
938 MGMT_STATUS_REJECTED);
942 if (!hdev_is_powered(hdev)) {
943 bool changed = false;
945 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
946 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
950 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
955 err = new_settings(hdev, sk);
960 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
961 if (hdev->discov_timeout > 0) {
962 cancel_delayed_work(&hdev->discov_off);
963 hdev->discov_timeout = 0;
966 if (cp->val && timeout > 0) {
967 hdev->discov_timeout = timeout;
968 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
969 msecs_to_jiffies(hdev->discov_timeout * 1000));
972 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
976 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
985 scan |= SCAN_INQUIRY;
987 cancel_delayed_work(&hdev->discov_off);
989 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
991 mgmt_pending_remove(cmd);
994 hdev->discov_timeout = timeout;
997 hci_dev_unlock(hdev);
1001 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1004 struct mgmt_mode *cp = data;
1005 struct pending_cmd *cmd;
1009 BT_DBG("request for %s", hdev->name);
1011 if (!lmp_bredr_capable(hdev))
1012 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1013 MGMT_STATUS_NOT_SUPPORTED);
1015 if (cp->val != 0x00 && cp->val != 0x01)
1016 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1017 MGMT_STATUS_INVALID_PARAMS);
1021 if (!hdev_is_powered(hdev)) {
1022 bool changed = false;
1024 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1028 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1030 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1031 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1034 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1039 err = new_settings(hdev, sk);
1044 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1045 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1046 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1051 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1052 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1056 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1067 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1068 hdev->discov_timeout > 0)
1069 cancel_delayed_work(&hdev->discov_off);
1072 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1074 mgmt_pending_remove(cmd);
1077 hci_dev_unlock(hdev);
1081 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1084 struct mgmt_mode *cp = data;
1087 BT_DBG("request for %s", hdev->name);
1089 if (cp->val != 0x00 && cp->val != 0x01)
1090 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1091 MGMT_STATUS_INVALID_PARAMS);
1096 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1098 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1100 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1104 err = new_settings(hdev, sk);
1107 hci_dev_unlock(hdev);
1111 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1114 struct mgmt_mode *cp = data;
1115 struct pending_cmd *cmd;
1119 BT_DBG("request for %s", hdev->name);
1121 if (!lmp_bredr_capable(hdev))
1122 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1123 MGMT_STATUS_NOT_SUPPORTED);
1125 if (cp->val != 0x00 && cp->val != 0x01)
1126 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1127 MGMT_STATUS_INVALID_PARAMS);
1131 if (!hdev_is_powered(hdev)) {
1132 bool changed = false;
1134 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1135 &hdev->dev_flags)) {
1136 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1140 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1145 err = new_settings(hdev, sk);
1150 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1151 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1158 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1159 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1163 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1169 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1171 mgmt_pending_remove(cmd);
1176 hci_dev_unlock(hdev);
1180 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1182 struct mgmt_mode *cp = data;
1183 struct pending_cmd *cmd;
1187 BT_DBG("request for %s", hdev->name);
1189 if (!lmp_ssp_capable(hdev))
1190 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1191 MGMT_STATUS_NOT_SUPPORTED);
1193 if (cp->val != 0x00 && cp->val != 0x01)
1194 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1195 MGMT_STATUS_INVALID_PARAMS);
1201 if (!hdev_is_powered(hdev)) {
1202 bool changed = false;
1204 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1205 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1209 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1214 err = new_settings(hdev, sk);
1219 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1220 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1225 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1226 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1230 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1236 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1238 mgmt_pending_remove(cmd);
1243 hci_dev_unlock(hdev);
1247 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1249 struct mgmt_mode *cp = data;
1251 BT_DBG("request for %s", hdev->name);
1254 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1255 MGMT_STATUS_NOT_SUPPORTED);
1257 if (cp->val != 0x00 && cp->val != 0x01)
1258 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1259 MGMT_STATUS_INVALID_PARAMS);
1262 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1264 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1266 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1269 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1271 struct mgmt_mode *cp = data;
1272 struct hci_cp_write_le_host_supported hci_cp;
1273 struct pending_cmd *cmd;
1277 BT_DBG("request for %s", hdev->name);
1279 if (!lmp_le_capable(hdev))
1280 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1281 MGMT_STATUS_NOT_SUPPORTED);
1283 if (cp->val != 0x00 && cp->val != 0x01)
1284 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1285 MGMT_STATUS_INVALID_PARAMS);
1290 enabled = lmp_host_le_capable(hdev);
1292 if (!hdev_is_powered(hdev) || val == enabled) {
1293 bool changed = false;
1295 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1296 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1300 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1305 err = new_settings(hdev, sk);
1310 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1311 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1316 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1322 memset(&hci_cp, 0, sizeof(hci_cp));
1326 hci_cp.simul = lmp_le_br_capable(hdev);
1329 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1332 mgmt_pending_remove(cmd);
1335 hci_dev_unlock(hdev);
1339 /* This is a helper function to test for pending mgmt commands that can
1340 * cause CoD or EIR HCI commands. We can only allow one such pending
1341 * mgmt command at a time since otherwise we cannot easily track what
1342 * the current values are, will be, and based on that calculate if a new
1343 * HCI command needs to be sent and if yes with what value.
1345 static bool pending_eir_or_class(struct hci_dev *hdev)
1347 struct pending_cmd *cmd;
1349 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1350 switch (cmd->opcode) {
1351 case MGMT_OP_ADD_UUID:
1352 case MGMT_OP_REMOVE_UUID:
1353 case MGMT_OP_SET_DEV_CLASS:
1354 case MGMT_OP_SET_POWERED:
1362 static const u8 bluetooth_base_uuid[] = {
1363 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1364 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1367 static u8 get_uuid_size(const u8 *uuid)
1371 if (memcmp(uuid, bluetooth_base_uuid, 12))
1374 val = get_unaligned_le32(&uuid[12]);
1381 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1383 struct mgmt_cp_add_uuid *cp = data;
1384 struct pending_cmd *cmd;
1385 struct hci_request req;
1386 struct bt_uuid *uuid;
1389 BT_DBG("request for %s", hdev->name);
1393 if (pending_eir_or_class(hdev)) {
1394 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1399 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1405 memcpy(uuid->uuid, cp->uuid, 16);
1406 uuid->svc_hint = cp->svc_hint;
1407 uuid->size = get_uuid_size(cp->uuid);
1409 list_add_tail(&uuid->list, &hdev->uuids);
1411 hci_req_init(&req, hdev);
1416 hci_req_run(&req, NULL);
1418 if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1419 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1420 hdev->dev_class, 3);
1424 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1433 hci_dev_unlock(hdev);
1437 static bool enable_service_cache(struct hci_dev *hdev)
1439 if (!hdev_is_powered(hdev))
1442 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1443 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1451 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1454 struct mgmt_cp_remove_uuid *cp = data;
1455 struct pending_cmd *cmd;
1456 struct bt_uuid *match, *tmp;
1457 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1458 struct hci_request req;
1461 BT_DBG("request for %s", hdev->name);
1465 if (pending_eir_or_class(hdev)) {
1466 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1471 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1472 err = hci_uuids_clear(hdev);
1474 if (enable_service_cache(hdev)) {
1475 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1476 0, hdev->dev_class, 3);
1485 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1486 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1489 list_del(&match->list);
1495 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1496 MGMT_STATUS_INVALID_PARAMS);
1501 hci_req_init(&req, hdev);
1506 hci_req_run(&req, NULL);
1508 if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1509 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1510 hdev->dev_class, 3);
1514 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1523 hci_dev_unlock(hdev);
1527 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1530 struct mgmt_cp_set_dev_class *cp = data;
1531 struct pending_cmd *cmd;
1532 struct hci_request req;
1535 BT_DBG("request for %s", hdev->name);
1537 if (!lmp_bredr_capable(hdev))
1538 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1539 MGMT_STATUS_NOT_SUPPORTED);
1543 if (pending_eir_or_class(hdev)) {
1544 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1549 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1550 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1551 MGMT_STATUS_INVALID_PARAMS);
1555 hdev->major_class = cp->major;
1556 hdev->minor_class = cp->minor;
1558 if (!hdev_is_powered(hdev)) {
1559 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1560 hdev->dev_class, 3);
1564 hci_req_init(&req, hdev);
1566 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1567 hci_dev_unlock(hdev);
1568 cancel_delayed_work_sync(&hdev->service_cache);
1575 hci_req_run(&req, NULL);
1577 if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1578 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1579 hdev->dev_class, 3);
1583 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1592 hci_dev_unlock(hdev);
1596 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1599 struct mgmt_cp_load_link_keys *cp = data;
1600 u16 key_count, expected_len;
1603 key_count = __le16_to_cpu(cp->key_count);
1605 expected_len = sizeof(*cp) + key_count *
1606 sizeof(struct mgmt_link_key_info);
1607 if (expected_len != len) {
1608 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1610 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1611 MGMT_STATUS_INVALID_PARAMS);
1614 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1615 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1616 MGMT_STATUS_INVALID_PARAMS);
1618 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1621 for (i = 0; i < key_count; i++) {
1622 struct mgmt_link_key_info *key = &cp->keys[i];
1624 if (key->addr.type != BDADDR_BREDR)
1625 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1626 MGMT_STATUS_INVALID_PARAMS);
1631 hci_link_keys_clear(hdev);
1633 set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1636 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1638 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1640 for (i = 0; i < key_count; i++) {
1641 struct mgmt_link_key_info *key = &cp->keys[i];
1643 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1644 key->type, key->pin_len);
1647 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1649 hci_dev_unlock(hdev);
1654 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1655 u8 addr_type, struct sock *skip_sk)
1657 struct mgmt_ev_device_unpaired ev;
1659 bacpy(&ev.addr.bdaddr, bdaddr);
1660 ev.addr.type = addr_type;
1662 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1666 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1669 struct mgmt_cp_unpair_device *cp = data;
1670 struct mgmt_rp_unpair_device rp;
1671 struct hci_cp_disconnect dc;
1672 struct pending_cmd *cmd;
1673 struct hci_conn *conn;
1676 memset(&rp, 0, sizeof(rp));
1677 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1678 rp.addr.type = cp->addr.type;
1680 if (!bdaddr_type_is_valid(cp->addr.type))
1681 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1682 MGMT_STATUS_INVALID_PARAMS,
1685 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1686 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1687 MGMT_STATUS_INVALID_PARAMS,
1692 if (!hdev_is_powered(hdev)) {
1693 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1694 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1698 if (cp->addr.type == BDADDR_BREDR)
1699 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1701 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1704 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1705 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1709 if (cp->disconnect) {
1710 if (cp->addr.type == BDADDR_BREDR)
1711 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1714 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1721 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1723 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1727 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1734 dc.handle = cpu_to_le16(conn->handle);
1735 dc.reason = 0x13; /* Remote User Terminated Connection */
1736 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1738 mgmt_pending_remove(cmd);
1741 hci_dev_unlock(hdev);
1745 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1748 struct mgmt_cp_disconnect *cp = data;
1749 struct mgmt_rp_disconnect rp;
1750 struct hci_cp_disconnect dc;
1751 struct pending_cmd *cmd;
1752 struct hci_conn *conn;
1757 memset(&rp, 0, sizeof(rp));
1758 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1759 rp.addr.type = cp->addr.type;
1761 if (!bdaddr_type_is_valid(cp->addr.type))
1762 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1763 MGMT_STATUS_INVALID_PARAMS,
1768 if (!test_bit(HCI_UP, &hdev->flags)) {
1769 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1770 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1774 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1775 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1776 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1780 if (cp->addr.type == BDADDR_BREDR)
1781 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1784 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1786 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1787 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1788 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1792 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1798 dc.handle = cpu_to_le16(conn->handle);
1799 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1801 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1803 mgmt_pending_remove(cmd);
1806 hci_dev_unlock(hdev);
1810 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1812 switch (link_type) {
1814 switch (addr_type) {
1815 case ADDR_LE_DEV_PUBLIC:
1816 return BDADDR_LE_PUBLIC;
1819 /* Fallback to LE Random address type */
1820 return BDADDR_LE_RANDOM;
1824 /* Fallback to BR/EDR type */
1825 return BDADDR_BREDR;
1829 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1832 struct mgmt_rp_get_connections *rp;
1842 if (!hdev_is_powered(hdev)) {
1843 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1844 MGMT_STATUS_NOT_POWERED);
1849 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1850 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1854 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1855 rp = kmalloc(rp_len, GFP_KERNEL);
1862 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1863 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1865 bacpy(&rp->addr[i].bdaddr, &c->dst);
1866 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1867 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1872 rp->conn_count = cpu_to_le16(i);
1874 /* Recalculate length in case of filtered SCO connections, etc */
1875 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1877 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1883 hci_dev_unlock(hdev);
1887 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1888 struct mgmt_cp_pin_code_neg_reply *cp)
1890 struct pending_cmd *cmd;
1893 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
1898 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1899 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
1901 mgmt_pending_remove(cmd);
1906 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
1909 struct hci_conn *conn;
1910 struct mgmt_cp_pin_code_reply *cp = data;
1911 struct hci_cp_pin_code_reply reply;
1912 struct pending_cmd *cmd;
1919 if (!hdev_is_powered(hdev)) {
1920 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1921 MGMT_STATUS_NOT_POWERED);
1925 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
1927 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1928 MGMT_STATUS_NOT_CONNECTED);
1932 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
1933 struct mgmt_cp_pin_code_neg_reply ncp;
1935 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
1937 BT_ERR("PIN code is not 16 bytes long");
1939 err = send_pin_code_neg_reply(sk, hdev, &ncp);
1941 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1942 MGMT_STATUS_INVALID_PARAMS);
1947 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
1953 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
1954 reply.pin_len = cp->pin_len;
1955 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
1957 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
1959 mgmt_pending_remove(cmd);
1962 hci_dev_unlock(hdev);
1966 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
1969 struct mgmt_cp_set_io_capability *cp = data;
1975 hdev->io_capability = cp->io_capability;
1977 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1978 hdev->io_capability);
1980 hci_dev_unlock(hdev);
1982 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
1986 static struct pending_cmd *find_pairing(struct hci_conn *conn)
1988 struct hci_dev *hdev = conn->hdev;
1989 struct pending_cmd *cmd;
1991 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1992 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
1995 if (cmd->user_data != conn)
2004 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2006 struct mgmt_rp_pair_device rp;
2007 struct hci_conn *conn = cmd->user_data;
2009 bacpy(&rp.addr.bdaddr, &conn->dst);
2010 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2012 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2015 /* So we don't get further callbacks for this connection */
2016 conn->connect_cfm_cb = NULL;
2017 conn->security_cfm_cb = NULL;
2018 conn->disconn_cfm_cb = NULL;
2022 mgmt_pending_remove(cmd);
2025 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2027 struct pending_cmd *cmd;
2029 BT_DBG("status %u", status);
2031 cmd = find_pairing(conn);
2033 BT_DBG("Unable to find a pending command");
2035 pairing_complete(cmd, mgmt_status(status));
2038 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2040 struct pending_cmd *cmd;
2042 BT_DBG("status %u", status);
2047 cmd = find_pairing(conn);
2049 BT_DBG("Unable to find a pending command");
2051 pairing_complete(cmd, mgmt_status(status));
2054 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2057 struct mgmt_cp_pair_device *cp = data;
2058 struct mgmt_rp_pair_device rp;
2059 struct pending_cmd *cmd;
2060 u8 sec_level, auth_type;
2061 struct hci_conn *conn;
2066 memset(&rp, 0, sizeof(rp));
2067 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2068 rp.addr.type = cp->addr.type;
2070 if (!bdaddr_type_is_valid(cp->addr.type))
2071 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2072 MGMT_STATUS_INVALID_PARAMS,
2077 if (!hdev_is_powered(hdev)) {
2078 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2079 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2083 sec_level = BT_SECURITY_MEDIUM;
2084 if (cp->io_cap == 0x03)
2085 auth_type = HCI_AT_DEDICATED_BONDING;
2087 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2089 if (cp->addr.type == BDADDR_BREDR)
2090 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2091 cp->addr.type, sec_level, auth_type);
2093 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2094 cp->addr.type, sec_level, auth_type);
2099 if (PTR_ERR(conn) == -EBUSY)
2100 status = MGMT_STATUS_BUSY;
2102 status = MGMT_STATUS_CONNECT_FAILED;
2104 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2110 if (conn->connect_cfm_cb) {
2112 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2113 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2117 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2124 /* For LE, just connecting isn't a proof that the pairing finished */
2125 if (cp->addr.type == BDADDR_BREDR)
2126 conn->connect_cfm_cb = pairing_complete_cb;
2128 conn->connect_cfm_cb = le_connect_complete_cb;
2130 conn->security_cfm_cb = pairing_complete_cb;
2131 conn->disconn_cfm_cb = pairing_complete_cb;
2132 conn->io_capability = cp->io_cap;
2133 cmd->user_data = conn;
2135 if (conn->state == BT_CONNECTED &&
2136 hci_conn_security(conn, sec_level, auth_type))
2137 pairing_complete(cmd, 0);
2142 hci_dev_unlock(hdev);
2146 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2149 struct mgmt_addr_info *addr = data;
2150 struct pending_cmd *cmd;
2151 struct hci_conn *conn;
2158 if (!hdev_is_powered(hdev)) {
2159 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2160 MGMT_STATUS_NOT_POWERED);
2164 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2166 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2167 MGMT_STATUS_INVALID_PARAMS);
2171 conn = cmd->user_data;
2173 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2174 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2175 MGMT_STATUS_INVALID_PARAMS);
2179 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2181 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2182 addr, sizeof(*addr));
2184 hci_dev_unlock(hdev);
2188 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2189 bdaddr_t *bdaddr, u8 type, u16 mgmt_op,
2190 u16 hci_op, __le32 passkey)
2192 struct pending_cmd *cmd;
2193 struct hci_conn *conn;
2198 if (!hdev_is_powered(hdev)) {
2199 err = cmd_status(sk, hdev->id, mgmt_op,
2200 MGMT_STATUS_NOT_POWERED);
2204 if (type == BDADDR_BREDR)
2205 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
2207 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
2210 err = cmd_status(sk, hdev->id, mgmt_op,
2211 MGMT_STATUS_NOT_CONNECTED);
2215 if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
2216 /* Continue with pairing via SMP */
2217 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2220 err = cmd_status(sk, hdev->id, mgmt_op,
2221 MGMT_STATUS_SUCCESS);
2223 err = cmd_status(sk, hdev->id, mgmt_op,
2224 MGMT_STATUS_FAILED);
2229 cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr));
2235 /* Continue with pairing via HCI */
2236 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2237 struct hci_cp_user_passkey_reply cp;
2239 bacpy(&cp.bdaddr, bdaddr);
2240 cp.passkey = passkey;
2241 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2243 err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr);
2246 mgmt_pending_remove(cmd);
2249 hci_dev_unlock(hdev);
2253 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2254 void *data, u16 len)
2256 struct mgmt_cp_pin_code_neg_reply *cp = data;
2260 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2261 MGMT_OP_PIN_CODE_NEG_REPLY,
2262 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2265 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2268 struct mgmt_cp_user_confirm_reply *cp = data;
2272 if (len != sizeof(*cp))
2273 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2274 MGMT_STATUS_INVALID_PARAMS);
2276 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2277 MGMT_OP_USER_CONFIRM_REPLY,
2278 HCI_OP_USER_CONFIRM_REPLY, 0);
2281 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2282 void *data, u16 len)
2284 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2288 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2289 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2290 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2293 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2296 struct mgmt_cp_user_passkey_reply *cp = data;
2300 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2301 MGMT_OP_USER_PASSKEY_REPLY,
2302 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2305 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2306 void *data, u16 len)
2308 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2312 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2313 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2314 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2317 static void update_name(struct hci_request *req, const char *name)
2319 struct hci_cp_write_local_name cp;
2321 memcpy(cp.name, name, sizeof(cp.name));
2323 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2326 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2329 struct mgmt_cp_set_local_name *cp = data;
2330 struct pending_cmd *cmd;
2331 struct hci_request req;
2338 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2340 if (!hdev_is_powered(hdev)) {
2341 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2343 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2348 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2354 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2360 hci_req_init(&req, hdev);
2361 update_name(&req, cp->name);
2362 err = hci_req_run(&req, NULL);
2364 mgmt_pending_remove(cmd);
2367 hci_dev_unlock(hdev);
2371 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2372 void *data, u16 data_len)
2374 struct pending_cmd *cmd;
2377 BT_DBG("%s", hdev->name);
2381 if (!hdev_is_powered(hdev)) {
2382 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2383 MGMT_STATUS_NOT_POWERED);
2387 if (!lmp_ssp_capable(hdev)) {
2388 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2389 MGMT_STATUS_NOT_SUPPORTED);
2393 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2394 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2399 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2405 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2407 mgmt_pending_remove(cmd);
2410 hci_dev_unlock(hdev);
2414 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2415 void *data, u16 len)
2417 struct mgmt_cp_add_remote_oob_data *cp = data;
2421 BT_DBG("%s ", hdev->name);
2425 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2428 status = MGMT_STATUS_FAILED;
2430 status = MGMT_STATUS_SUCCESS;
2432 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2433 &cp->addr, sizeof(cp->addr));
2435 hci_dev_unlock(hdev);
2439 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2440 void *data, u16 len)
2442 struct mgmt_cp_remove_remote_oob_data *cp = data;
2446 BT_DBG("%s", hdev->name);
2450 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2452 status = MGMT_STATUS_INVALID_PARAMS;
2454 status = MGMT_STATUS_SUCCESS;
2456 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2457 status, &cp->addr, sizeof(cp->addr));
2459 hci_dev_unlock(hdev);
2463 int mgmt_interleaved_discovery(struct hci_dev *hdev)
2467 BT_DBG("%s", hdev->name);
2471 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
2473 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2475 hci_dev_unlock(hdev);
2480 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2481 void *data, u16 len)
2483 struct mgmt_cp_start_discovery *cp = data;
2484 struct pending_cmd *cmd;
2487 BT_DBG("%s", hdev->name);
2491 if (!hdev_is_powered(hdev)) {
2492 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2493 MGMT_STATUS_NOT_POWERED);
2497 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2498 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2503 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2504 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2509 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2515 hdev->discovery.type = cp->type;
2517 switch (hdev->discovery.type) {
2518 case DISCOV_TYPE_BREDR:
2519 if (!lmp_bredr_capable(hdev)) {
2520 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2521 MGMT_STATUS_NOT_SUPPORTED);
2522 mgmt_pending_remove(cmd);
2526 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2529 case DISCOV_TYPE_LE:
2530 if (!lmp_host_le_capable(hdev)) {
2531 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2532 MGMT_STATUS_NOT_SUPPORTED);
2533 mgmt_pending_remove(cmd);
2537 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2538 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2541 case DISCOV_TYPE_INTERLEAVED:
2542 if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
2543 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2544 MGMT_STATUS_NOT_SUPPORTED);
2545 mgmt_pending_remove(cmd);
2549 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, LE_SCAN_WIN,
2550 LE_SCAN_TIMEOUT_BREDR_LE);
2554 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2555 MGMT_STATUS_INVALID_PARAMS);
2556 mgmt_pending_remove(cmd);
2561 mgmt_pending_remove(cmd);
2563 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2566 hci_dev_unlock(hdev);
2570 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2573 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2574 struct pending_cmd *cmd;
2575 struct hci_cp_remote_name_req_cancel cp;
2576 struct inquiry_entry *e;
2579 BT_DBG("%s", hdev->name);
2583 if (!hci_discovery_active(hdev)) {
2584 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2585 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2586 sizeof(mgmt_cp->type));
2590 if (hdev->discovery.type != mgmt_cp->type) {
2591 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2592 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2593 sizeof(mgmt_cp->type));
2597 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2603 switch (hdev->discovery.state) {
2604 case DISCOVERY_FINDING:
2605 if (test_bit(HCI_INQUIRY, &hdev->flags))
2606 err = hci_cancel_inquiry(hdev);
2608 err = hci_cancel_le_scan(hdev);
2612 case DISCOVERY_RESOLVING:
2613 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2616 mgmt_pending_remove(cmd);
2617 err = cmd_complete(sk, hdev->id,
2618 MGMT_OP_STOP_DISCOVERY, 0,
2620 sizeof(mgmt_cp->type));
2621 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2625 bacpy(&cp.bdaddr, &e->data.bdaddr);
2626 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2632 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2637 mgmt_pending_remove(cmd);
2639 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2642 hci_dev_unlock(hdev);
2646 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2649 struct mgmt_cp_confirm_name *cp = data;
2650 struct inquiry_entry *e;
2653 BT_DBG("%s", hdev->name);
2657 if (!hci_discovery_active(hdev)) {
2658 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2659 MGMT_STATUS_FAILED);
2663 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2665 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2666 MGMT_STATUS_INVALID_PARAMS);
2670 if (cp->name_known) {
2671 e->name_state = NAME_KNOWN;
2674 e->name_state = NAME_NEEDED;
2675 hci_inquiry_cache_update_resolve(hdev, e);
2678 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2682 hci_dev_unlock(hdev);
2686 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2689 struct mgmt_cp_block_device *cp = data;
2693 BT_DBG("%s", hdev->name);
2695 if (!bdaddr_type_is_valid(cp->addr.type))
2696 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2697 MGMT_STATUS_INVALID_PARAMS,
2698 &cp->addr, sizeof(cp->addr));
2702 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2704 status = MGMT_STATUS_FAILED;
2706 status = MGMT_STATUS_SUCCESS;
2708 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2709 &cp->addr, sizeof(cp->addr));
2711 hci_dev_unlock(hdev);
2716 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2719 struct mgmt_cp_unblock_device *cp = data;
2723 BT_DBG("%s", hdev->name);
2725 if (!bdaddr_type_is_valid(cp->addr.type))
2726 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
2727 MGMT_STATUS_INVALID_PARAMS,
2728 &cp->addr, sizeof(cp->addr));
2732 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2734 status = MGMT_STATUS_INVALID_PARAMS;
2736 status = MGMT_STATUS_SUCCESS;
2738 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2739 &cp->addr, sizeof(cp->addr));
2741 hci_dev_unlock(hdev);
2746 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2749 struct mgmt_cp_set_device_id *cp = data;
2750 struct hci_request req;
2754 BT_DBG("%s", hdev->name);
2756 source = __le16_to_cpu(cp->source);
2758 if (source > 0x0002)
2759 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2760 MGMT_STATUS_INVALID_PARAMS);
2764 hdev->devid_source = source;
2765 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2766 hdev->devid_product = __le16_to_cpu(cp->product);
2767 hdev->devid_version = __le16_to_cpu(cp->version);
2769 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2771 hci_req_init(&req, hdev);
2773 hci_req_run(&req, NULL);
2775 hci_dev_unlock(hdev);
2780 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2781 void *data, u16 len)
2783 struct mgmt_mode *cp = data;
2784 struct hci_cp_write_page_scan_activity acp;
2788 BT_DBG("%s", hdev->name);
2790 if (!lmp_bredr_capable(hdev))
2791 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2792 MGMT_STATUS_NOT_SUPPORTED);
2794 if (cp->val != 0x00 && cp->val != 0x01)
2795 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2796 MGMT_STATUS_INVALID_PARAMS);
2798 if (!hdev_is_powered(hdev))
2799 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2800 MGMT_STATUS_NOT_POWERED);
2802 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2803 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2804 MGMT_STATUS_REJECTED);
2809 type = PAGE_SCAN_TYPE_INTERLACED;
2811 /* 160 msec page scan interval */
2812 acp.interval = __constant_cpu_to_le16(0x0100);
2814 type = PAGE_SCAN_TYPE_STANDARD; /* default */
2816 /* default 1.28 sec page scan */
2817 acp.interval = __constant_cpu_to_le16(0x0800);
2820 /* default 11.25 msec page scan window */
2821 acp.window = __constant_cpu_to_le16(0x0012);
2823 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp),
2826 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2827 MGMT_STATUS_FAILED);
2831 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
2833 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2834 MGMT_STATUS_FAILED);
2838 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 0,
2841 hci_dev_unlock(hdev);
2845 static bool ltk_is_valid(struct mgmt_ltk_info *key)
2847 if (key->authenticated != 0x00 && key->authenticated != 0x01)
2849 if (key->master != 0x00 && key->master != 0x01)
2851 if (!bdaddr_type_is_le(key->addr.type))
2856 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2857 void *cp_data, u16 len)
2859 struct mgmt_cp_load_long_term_keys *cp = cp_data;
2860 u16 key_count, expected_len;
2863 key_count = __le16_to_cpu(cp->key_count);
2865 expected_len = sizeof(*cp) + key_count *
2866 sizeof(struct mgmt_ltk_info);
2867 if (expected_len != len) {
2868 BT_ERR("load_keys: expected %u bytes, got %u bytes",
2870 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
2871 MGMT_STATUS_INVALID_PARAMS);
2874 BT_DBG("%s key_count %u", hdev->name, key_count);
2876 for (i = 0; i < key_count; i++) {
2877 struct mgmt_ltk_info *key = &cp->keys[i];
2879 if (!ltk_is_valid(key))
2880 return cmd_status(sk, hdev->id,
2881 MGMT_OP_LOAD_LONG_TERM_KEYS,
2882 MGMT_STATUS_INVALID_PARAMS);
2887 hci_smp_ltks_clear(hdev);
2889 for (i = 0; i < key_count; i++) {
2890 struct mgmt_ltk_info *key = &cp->keys[i];
2896 type = HCI_SMP_LTK_SLAVE;
2898 hci_add_ltk(hdev, &key->addr.bdaddr,
2899 bdaddr_to_le(key->addr.type),
2900 type, 0, key->authenticated, key->val,
2901 key->enc_size, key->ediv, key->rand);
2904 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
2907 hci_dev_unlock(hdev);
2912 static const struct mgmt_handler {
2913 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
2917 } mgmt_handlers[] = {
2918 { NULL }, /* 0x0000 (no command) */
2919 { read_version, false, MGMT_READ_VERSION_SIZE },
2920 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
2921 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
2922 { read_controller_info, false, MGMT_READ_INFO_SIZE },
2923 { set_powered, false, MGMT_SETTING_SIZE },
2924 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
2925 { set_connectable, false, MGMT_SETTING_SIZE },
2926 { set_fast_connectable, false, MGMT_SETTING_SIZE },
2927 { set_pairable, false, MGMT_SETTING_SIZE },
2928 { set_link_security, false, MGMT_SETTING_SIZE },
2929 { set_ssp, false, MGMT_SETTING_SIZE },
2930 { set_hs, false, MGMT_SETTING_SIZE },
2931 { set_le, false, MGMT_SETTING_SIZE },
2932 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
2933 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
2934 { add_uuid, false, MGMT_ADD_UUID_SIZE },
2935 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
2936 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
2937 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
2938 { disconnect, false, MGMT_DISCONNECT_SIZE },
2939 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
2940 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
2941 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
2942 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
2943 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
2944 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
2945 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
2946 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
2947 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
2948 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
2949 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
2950 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
2951 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
2952 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
2953 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
2954 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
2955 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
2956 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
2957 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
2958 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
2962 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2966 struct mgmt_hdr *hdr;
2967 u16 opcode, index, len;
2968 struct hci_dev *hdev = NULL;
2969 const struct mgmt_handler *handler;
2972 BT_DBG("got %zu bytes", msglen);
2974 if (msglen < sizeof(*hdr))
2977 buf = kmalloc(msglen, GFP_KERNEL);
2981 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
2987 opcode = __le16_to_cpu(hdr->opcode);
2988 index = __le16_to_cpu(hdr->index);
2989 len = __le16_to_cpu(hdr->len);
2991 if (len != msglen - sizeof(*hdr)) {
2996 if (index != MGMT_INDEX_NONE) {
2997 hdev = hci_dev_get(index);
2999 err = cmd_status(sk, index, opcode,
3000 MGMT_STATUS_INVALID_INDEX);
3005 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3006 mgmt_handlers[opcode].func == NULL) {
3007 BT_DBG("Unknown op %u", opcode);
3008 err = cmd_status(sk, index, opcode,
3009 MGMT_STATUS_UNKNOWN_COMMAND);
3013 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3014 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3015 err = cmd_status(sk, index, opcode,
3016 MGMT_STATUS_INVALID_INDEX);
3020 handler = &mgmt_handlers[opcode];
3022 if ((handler->var_len && len < handler->data_len) ||
3023 (!handler->var_len && len != handler->data_len)) {
3024 err = cmd_status(sk, index, opcode,
3025 MGMT_STATUS_INVALID_PARAMS);
3030 mgmt_init_hdev(sk, hdev);
3032 cp = buf + sizeof(*hdr);
3034 err = handler->func(sk, hdev, cp, len);
3048 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
3052 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
3053 mgmt_pending_remove(cmd);
3056 int mgmt_index_added(struct hci_dev *hdev)
3058 if (!mgmt_valid_hdev(hdev))
3061 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3064 int mgmt_index_removed(struct hci_dev *hdev)
3066 u8 status = MGMT_STATUS_INVALID_INDEX;
3068 if (!mgmt_valid_hdev(hdev))
3071 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3073 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3078 struct hci_dev *hdev;
3082 static void settings_rsp(struct pending_cmd *cmd, void *data)
3084 struct cmd_lookup *match = data;
3086 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
3088 list_del(&cmd->list);
3090 if (match->sk == NULL) {
3091 match->sk = cmd->sk;
3092 sock_hold(match->sk);
3095 mgmt_pending_free(cmd);
3098 static void set_bredr_scan(struct hci_request *req)
3100 struct hci_dev *hdev = req->hdev;
3103 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3105 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3106 scan |= SCAN_INQUIRY;
3109 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3112 static void powered_complete(struct hci_dev *hdev, u8 status)
3114 struct cmd_lookup match = { NULL, hdev };
3116 BT_DBG("status 0x%02x", status);
3120 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3122 new_settings(hdev, match.sk);
3124 hci_dev_unlock(hdev);
3130 static int powered_update_hci(struct hci_dev *hdev)
3132 struct hci_request req;
3135 hci_req_init(&req, hdev);
3137 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3138 !lmp_host_ssp_capable(hdev)) {
3141 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3144 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
3145 struct hci_cp_write_le_host_supported cp;
3148 cp.simul = lmp_le_br_capable(hdev);
3150 /* Check first if we already have the right
3151 * host state (host features set)
3153 if (cp.le != lmp_host_le_capable(hdev) ||
3154 cp.simul != lmp_host_le_br_capable(hdev))
3155 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3159 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3160 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3161 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3162 sizeof(link_sec), &link_sec);
3164 if (lmp_bredr_capable(hdev)) {
3165 set_bredr_scan(&req);
3167 update_name(&req, hdev->dev_name);
3171 return hci_req_run(&req, powered_complete);
3174 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3176 struct cmd_lookup match = { NULL, hdev };
3177 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3178 u8 zero_cod[] = { 0, 0, 0 };
3181 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3185 if (powered_update_hci(hdev) == 0)
3188 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3193 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3194 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3196 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3197 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3198 zero_cod, sizeof(zero_cod), NULL);
3201 err = new_settings(hdev, match.sk);
3209 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3211 struct cmd_lookup match = { NULL, hdev };
3212 bool changed = false;
3216 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3219 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3223 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3227 err = new_settings(hdev, match.sk);
3235 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3237 struct cmd_lookup match = { NULL, hdev };
3238 bool changed = false;
3242 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3245 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3249 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp,
3253 err = new_settings(hdev, match.sk);
3261 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3263 u8 mgmt_err = mgmt_status(status);
3265 if (scan & SCAN_PAGE)
3266 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3267 cmd_status_rsp, &mgmt_err);
3269 if (scan & SCAN_INQUIRY)
3270 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3271 cmd_status_rsp, &mgmt_err);
3276 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3279 struct mgmt_ev_new_link_key ev;
3281 memset(&ev, 0, sizeof(ev));
3283 ev.store_hint = persistent;
3284 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3285 ev.key.addr.type = BDADDR_BREDR;
3286 ev.key.type = key->type;
3287 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3288 ev.key.pin_len = key->pin_len;
3290 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3293 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3295 struct mgmt_ev_new_long_term_key ev;
3297 memset(&ev, 0, sizeof(ev));
3299 ev.store_hint = persistent;
3300 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3301 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3302 ev.key.authenticated = key->authenticated;
3303 ev.key.enc_size = key->enc_size;
3304 ev.key.ediv = key->ediv;
3306 if (key->type == HCI_SMP_LTK)
3309 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3310 memcpy(ev.key.val, key->val, sizeof(key->val));
3312 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3316 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3317 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3321 struct mgmt_ev_device_connected *ev = (void *) buf;
3324 bacpy(&ev->addr.bdaddr, bdaddr);
3325 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3327 ev->flags = __cpu_to_le32(flags);
3330 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3333 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3334 eir_len = eir_append_data(ev->eir, eir_len,
3335 EIR_CLASS_OF_DEV, dev_class, 3);
3337 ev->eir_len = cpu_to_le16(eir_len);
3339 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3340 sizeof(*ev) + eir_len, NULL);
3343 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3345 struct mgmt_cp_disconnect *cp = cmd->param;
3346 struct sock **sk = data;
3347 struct mgmt_rp_disconnect rp;
3349 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3350 rp.addr.type = cp->addr.type;
3352 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3358 mgmt_pending_remove(cmd);
3361 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3363 struct hci_dev *hdev = data;
3364 struct mgmt_cp_unpair_device *cp = cmd->param;
3365 struct mgmt_rp_unpair_device rp;
3367 memset(&rp, 0, sizeof(rp));
3368 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3369 rp.addr.type = cp->addr.type;
3371 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3373 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3375 mgmt_pending_remove(cmd);
3378 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3379 u8 link_type, u8 addr_type, u8 reason)
3381 struct mgmt_ev_device_disconnected ev;
3382 struct sock *sk = NULL;
3385 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3387 bacpy(&ev.addr.bdaddr, bdaddr);
3388 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3391 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3397 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3403 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3404 u8 link_type, u8 addr_type, u8 status)
3406 struct mgmt_rp_disconnect rp;
3407 struct pending_cmd *cmd;
3410 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3413 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3417 bacpy(&rp.addr.bdaddr, bdaddr);
3418 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3420 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3421 mgmt_status(status), &rp, sizeof(rp));
3423 mgmt_pending_remove(cmd);
3428 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3429 u8 addr_type, u8 status)
3431 struct mgmt_ev_connect_failed ev;
3433 bacpy(&ev.addr.bdaddr, bdaddr);
3434 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3435 ev.status = mgmt_status(status);
3437 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3440 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3442 struct mgmt_ev_pin_code_request ev;
3444 bacpy(&ev.addr.bdaddr, bdaddr);
3445 ev.addr.type = BDADDR_BREDR;
3448 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3452 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3455 struct pending_cmd *cmd;
3456 struct mgmt_rp_pin_code_reply rp;
3459 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3463 bacpy(&rp.addr.bdaddr, bdaddr);
3464 rp.addr.type = BDADDR_BREDR;
3466 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3467 mgmt_status(status), &rp, sizeof(rp));
3469 mgmt_pending_remove(cmd);
3474 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3477 struct pending_cmd *cmd;
3478 struct mgmt_rp_pin_code_reply rp;
3481 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3485 bacpy(&rp.addr.bdaddr, bdaddr);
3486 rp.addr.type = BDADDR_BREDR;
3488 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3489 mgmt_status(status), &rp, sizeof(rp));
3491 mgmt_pending_remove(cmd);
3496 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3497 u8 link_type, u8 addr_type, __le32 value,
3500 struct mgmt_ev_user_confirm_request ev;
3502 BT_DBG("%s", hdev->name);
3504 bacpy(&ev.addr.bdaddr, bdaddr);
3505 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3506 ev.confirm_hint = confirm_hint;
3509 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3513 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3514 u8 link_type, u8 addr_type)
3516 struct mgmt_ev_user_passkey_request ev;
3518 BT_DBG("%s", hdev->name);
3520 bacpy(&ev.addr.bdaddr, bdaddr);
3521 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3523 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3527 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3528 u8 link_type, u8 addr_type, u8 status,
3531 struct pending_cmd *cmd;
3532 struct mgmt_rp_user_confirm_reply rp;
3535 cmd = mgmt_pending_find(opcode, hdev);
3539 bacpy(&rp.addr.bdaddr, bdaddr);
3540 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3541 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3544 mgmt_pending_remove(cmd);
3549 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3550 u8 link_type, u8 addr_type, u8 status)
3552 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3553 status, MGMT_OP_USER_CONFIRM_REPLY);
3556 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3557 u8 link_type, u8 addr_type, u8 status)
3559 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3561 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3564 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3565 u8 link_type, u8 addr_type, u8 status)
3567 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3568 status, MGMT_OP_USER_PASSKEY_REPLY);
3571 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3572 u8 link_type, u8 addr_type, u8 status)
3574 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3576 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3579 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3580 u8 link_type, u8 addr_type, u32 passkey,
3583 struct mgmt_ev_passkey_notify ev;
3585 BT_DBG("%s", hdev->name);
3587 bacpy(&ev.addr.bdaddr, bdaddr);
3588 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3589 ev.passkey = __cpu_to_le32(passkey);
3590 ev.entered = entered;
3592 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3595 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3596 u8 addr_type, u8 status)
3598 struct mgmt_ev_auth_failed ev;
3600 bacpy(&ev.addr.bdaddr, bdaddr);
3601 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3602 ev.status = mgmt_status(status);
3604 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3607 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3609 struct cmd_lookup match = { NULL, hdev };
3610 bool changed = false;
3614 u8 mgmt_err = mgmt_status(status);
3615 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3616 cmd_status_rsp, &mgmt_err);
3620 if (test_bit(HCI_AUTH, &hdev->flags)) {
3621 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3624 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3628 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3632 err = new_settings(hdev, match.sk);
3640 static void clear_eir(struct hci_request *req)
3642 struct hci_dev *hdev = req->hdev;
3643 struct hci_cp_write_eir cp;
3645 if (!lmp_ext_inq_capable(hdev))
3648 memset(hdev->eir, 0, sizeof(hdev->eir));
3650 memset(&cp, 0, sizeof(cp));
3652 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3655 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3657 struct cmd_lookup match = { NULL, hdev };
3658 struct hci_request req;
3659 bool changed = false;
3663 u8 mgmt_err = mgmt_status(status);
3665 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
3667 err = new_settings(hdev, NULL);
3669 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3676 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3679 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3683 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
3686 err = new_settings(hdev, match.sk);
3691 hci_req_init(&req, hdev);
3693 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3698 hci_req_run(&req, NULL);
3703 static void class_rsp(struct pending_cmd *cmd, void *data)
3705 struct cmd_lookup *match = data;
3707 cmd_complete(cmd->sk, cmd->index, cmd->opcode, match->mgmt_status,
3708 match->hdev->dev_class, 3);
3710 list_del(&cmd->list);
3712 if (match->sk == NULL) {
3713 match->sk = cmd->sk;
3714 sock_hold(match->sk);
3717 mgmt_pending_free(cmd);
3720 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3723 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3726 clear_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
3728 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, class_rsp, &match);
3729 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, class_rsp, &match);
3730 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, class_rsp, &match);
3733 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3742 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
3744 struct pending_cmd *cmd;
3745 struct mgmt_cp_set_local_name ev;
3746 bool changed = false;
3749 if (memcmp(name, hdev->dev_name, sizeof(hdev->dev_name)) != 0) {
3750 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3754 memset(&ev, 0, sizeof(ev));
3755 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3756 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
3758 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3762 /* Always assume that either the short or the complete name has
3763 * changed if there was a pending mgmt command */
3767 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3768 mgmt_status(status));
3772 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, &ev,
3779 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev,
3780 sizeof(ev), cmd ? cmd->sk : NULL);
3782 /* EIR is taken care of separately when powering on the
3783 * adapter so only update them here if this is a name change
3784 * unrelated to power on.
3786 if (!test_bit(HCI_INIT, &hdev->flags)) {
3787 struct hci_request req;
3788 hci_req_init(&req, hdev);
3790 hci_req_run(&req, NULL);
3795 mgmt_pending_remove(cmd);
3799 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
3800 u8 *randomizer, u8 status)
3802 struct pending_cmd *cmd;
3805 BT_DBG("%s status %u", hdev->name, status);
3807 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3812 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3813 mgmt_status(status));
3815 struct mgmt_rp_read_local_oob_data rp;
3817 memcpy(rp.hash, hash, sizeof(rp.hash));
3818 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
3820 err = cmd_complete(cmd->sk, hdev->id,
3821 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
3825 mgmt_pending_remove(cmd);
3830 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3832 struct cmd_lookup match = { NULL, hdev };
3833 bool changed = false;
3837 u8 mgmt_err = mgmt_status(status);
3839 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
3841 err = new_settings(hdev, NULL);
3843 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
3850 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3853 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3857 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
3860 err = new_settings(hdev, match.sk);
3868 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3869 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
3870 ssp, u8 *eir, u16 eir_len)
3873 struct mgmt_ev_device_found *ev = (void *) buf;
3876 /* Leave 5 bytes for a potential CoD field */
3877 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
3880 memset(buf, 0, sizeof(buf));
3882 bacpy(&ev->addr.bdaddr, bdaddr);
3883 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3886 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
3888 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
3891 memcpy(ev->eir, eir, eir_len);
3893 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
3894 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
3897 ev->eir_len = cpu_to_le16(eir_len);
3898 ev_size = sizeof(*ev) + eir_len;
3900 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
3903 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3904 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
3906 struct mgmt_ev_device_found *ev;
3907 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
3910 ev = (struct mgmt_ev_device_found *) buf;
3912 memset(buf, 0, sizeof(buf));
3914 bacpy(&ev->addr.bdaddr, bdaddr);
3915 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3918 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
3921 ev->eir_len = cpu_to_le16(eir_len);
3923 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
3924 sizeof(*ev) + eir_len, NULL);
3927 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3929 struct pending_cmd *cmd;
3933 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3935 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3939 type = hdev->discovery.type;
3941 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3942 &type, sizeof(type));
3943 mgmt_pending_remove(cmd);
3948 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3950 struct pending_cmd *cmd;
3953 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3957 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3958 &hdev->discovery.type, sizeof(hdev->discovery.type));
3959 mgmt_pending_remove(cmd);
3964 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
3966 struct mgmt_ev_discovering ev;
3967 struct pending_cmd *cmd;
3969 BT_DBG("%s discovering %u", hdev->name, discovering);
3972 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3974 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3977 u8 type = hdev->discovery.type;
3979 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
3981 mgmt_pending_remove(cmd);
3984 memset(&ev, 0, sizeof(ev));
3985 ev.type = hdev->discovery.type;
3986 ev.discovering = discovering;
3988 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
3991 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3993 struct pending_cmd *cmd;
3994 struct mgmt_ev_device_blocked ev;
3996 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
3998 bacpy(&ev.addr.bdaddr, bdaddr);
3999 ev.addr.type = type;
4001 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4002 cmd ? cmd->sk : NULL);
4005 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4007 struct pending_cmd *cmd;
4008 struct mgmt_ev_device_unblocked ev;
4010 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4012 bacpy(&ev.addr.bdaddr, bdaddr);
4013 ev.addr.type = type;
4015 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4016 cmd ? cmd->sk : NULL);
4019 module_param(enable_hs, bool, 0644);
4020 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");