2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
84 MGMT_EV_INDEX_REMOVED,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
106 * These LE scan and inquiry parameters were chosen according to LE General
107 * Discovery Procedure specification.
109 #define LE_SCAN_TYPE 0x01
110 #define LE_SCAN_WIN 0x12
111 #define LE_SCAN_INT 0x12
112 #define LE_SCAN_TIMEOUT_LE_ONLY 10240 /* TGAP(gen_disc_scan_min) */
113 #define LE_SCAN_TIMEOUT_BREDR_LE 5120 /* TGAP(100)/2 */
115 #define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
116 #define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
118 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
120 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
121 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
124 struct list_head list;
132 /* HCI to MGMT error code conversion table */
133 static u8 mgmt_status_table[] = {
135 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
136 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
137 MGMT_STATUS_FAILED, /* Hardware Failure */
138 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
139 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
140 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
141 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
142 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
143 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
144 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
145 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
146 MGMT_STATUS_BUSY, /* Command Disallowed */
147 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
148 MGMT_STATUS_REJECTED, /* Rejected Security */
149 MGMT_STATUS_REJECTED, /* Rejected Personal */
150 MGMT_STATUS_TIMEOUT, /* Host Timeout */
151 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
152 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
153 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
154 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
155 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
156 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
157 MGMT_STATUS_BUSY, /* Repeated Attempts */
158 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
159 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
161 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
162 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
163 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
165 MGMT_STATUS_FAILED, /* Unspecified Error */
166 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
167 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
168 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
169 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
170 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
171 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
172 MGMT_STATUS_FAILED, /* Unit Link Key Used */
173 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
174 MGMT_STATUS_TIMEOUT, /* Instant Passed */
175 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
176 MGMT_STATUS_FAILED, /* Transaction Collision */
177 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
178 MGMT_STATUS_REJECTED, /* QoS Rejected */
179 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
180 MGMT_STATUS_REJECTED, /* Insufficient Security */
181 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
182 MGMT_STATUS_BUSY, /* Role Switch Pending */
183 MGMT_STATUS_FAILED, /* Slot Violation */
184 MGMT_STATUS_FAILED, /* Role Switch Failed */
185 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
186 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
187 MGMT_STATUS_BUSY, /* Host Busy Pairing */
188 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
189 MGMT_STATUS_BUSY, /* Controller Busy */
190 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
191 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
192 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
193 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
194 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
197 bool mgmt_valid_hdev(struct hci_dev *hdev)
199 return hdev->dev_type == HCI_BREDR;
202 static u8 mgmt_status(u8 hci_status)
204 if (hci_status < ARRAY_SIZE(mgmt_status_table))
205 return mgmt_status_table[hci_status];
207 return MGMT_STATUS_FAILED;
210 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
213 struct mgmt_hdr *hdr;
214 struct mgmt_ev_cmd_status *ev;
217 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
219 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
223 hdr = (void *) skb_put(skb, sizeof(*hdr));
225 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
226 hdr->index = cpu_to_le16(index);
227 hdr->len = cpu_to_le16(sizeof(*ev));
229 ev = (void *) skb_put(skb, sizeof(*ev));
231 ev->opcode = cpu_to_le16(cmd);
233 err = sock_queue_rcv_skb(sk, skb);
240 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
241 void *rp, size_t rp_len)
244 struct mgmt_hdr *hdr;
245 struct mgmt_ev_cmd_complete *ev;
248 BT_DBG("sock %p", sk);
250 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
254 hdr = (void *) skb_put(skb, sizeof(*hdr));
256 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
257 hdr->index = cpu_to_le16(index);
258 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
260 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
261 ev->opcode = cpu_to_le16(cmd);
265 memcpy(ev->data, rp, rp_len);
267 err = sock_queue_rcv_skb(sk, skb);
274 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
277 struct mgmt_rp_read_version rp;
279 BT_DBG("sock %p", sk);
281 rp.version = MGMT_VERSION;
282 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
284 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
288 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
291 struct mgmt_rp_read_commands *rp;
292 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
293 const u16 num_events = ARRAY_SIZE(mgmt_events);
298 BT_DBG("sock %p", sk);
300 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
302 rp = kmalloc(rp_size, GFP_KERNEL);
306 rp->num_commands = __constant_cpu_to_le16(num_commands);
307 rp->num_events = __constant_cpu_to_le16(num_events);
309 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
310 put_unaligned_le16(mgmt_commands[i], opcode);
312 for (i = 0; i < num_events; i++, opcode++)
313 put_unaligned_le16(mgmt_events[i], opcode);
315 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
322 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
325 struct mgmt_rp_read_index_list *rp;
331 BT_DBG("sock %p", sk);
333 read_lock(&hci_dev_list_lock);
336 list_for_each_entry(d, &hci_dev_list, list) {
337 if (!mgmt_valid_hdev(d))
343 rp_len = sizeof(*rp) + (2 * count);
344 rp = kmalloc(rp_len, GFP_ATOMIC);
346 read_unlock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (test_bit(HCI_SETUP, &d->dev_flags))
355 if (!mgmt_valid_hdev(d))
358 rp->index[count++] = cpu_to_le16(d->id);
359 BT_DBG("Added hci%u", d->id);
362 rp->num_controllers = cpu_to_le16(count);
363 rp_len = sizeof(*rp) + (2 * count);
365 read_unlock(&hci_dev_list_lock);
367 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
375 static u32 get_supported_settings(struct hci_dev *hdev)
379 settings |= MGMT_SETTING_POWERED;
380 settings |= MGMT_SETTING_PAIRABLE;
382 if (lmp_ssp_capable(hdev))
383 settings |= MGMT_SETTING_SSP;
385 if (lmp_bredr_capable(hdev)) {
386 settings |= MGMT_SETTING_CONNECTABLE;
387 settings |= MGMT_SETTING_FAST_CONNECTABLE;
388 settings |= MGMT_SETTING_DISCOVERABLE;
389 settings |= MGMT_SETTING_BREDR;
390 settings |= MGMT_SETTING_LINK_SECURITY;
394 settings |= MGMT_SETTING_HS;
396 if (lmp_le_capable(hdev))
397 settings |= MGMT_SETTING_LE;
402 static u32 get_current_settings(struct hci_dev *hdev)
406 if (hdev_is_powered(hdev))
407 settings |= MGMT_SETTING_POWERED;
409 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_CONNECTABLE;
412 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
413 settings |= MGMT_SETTING_DISCOVERABLE;
415 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
416 settings |= MGMT_SETTING_PAIRABLE;
418 if (lmp_bredr_capable(hdev))
419 settings |= MGMT_SETTING_BREDR;
421 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
422 settings |= MGMT_SETTING_LE;
424 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
425 settings |= MGMT_SETTING_LINK_SECURITY;
427 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
428 settings |= MGMT_SETTING_SSP;
430 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
431 settings |= MGMT_SETTING_HS;
436 #define PNP_INFO_SVCLASS_ID 0x1200
438 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
440 u8 *ptr = data, *uuids_start = NULL;
441 struct bt_uuid *uuid;
446 list_for_each_entry(uuid, &hdev->uuids, list) {
449 if (uuid->size != 16)
452 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
456 if (uuid16 == PNP_INFO_SVCLASS_ID)
462 uuids_start[1] = EIR_UUID16_ALL;
466 /* Stop if not enough space to put next UUID */
467 if ((ptr - data) + sizeof(u16) > len) {
468 uuids_start[1] = EIR_UUID16_SOME;
472 *ptr++ = (uuid16 & 0x00ff);
473 *ptr++ = (uuid16 & 0xff00) >> 8;
474 uuids_start[0] += sizeof(uuid16);
480 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
482 u8 *ptr = data, *uuids_start = NULL;
483 struct bt_uuid *uuid;
488 list_for_each_entry(uuid, &hdev->uuids, list) {
489 if (uuid->size != 32)
495 uuids_start[1] = EIR_UUID32_ALL;
499 /* Stop if not enough space to put next UUID */
500 if ((ptr - data) + sizeof(u32) > len) {
501 uuids_start[1] = EIR_UUID32_SOME;
505 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
507 uuids_start[0] += sizeof(u32);
513 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515 u8 *ptr = data, *uuids_start = NULL;
516 struct bt_uuid *uuid;
521 list_for_each_entry(uuid, &hdev->uuids, list) {
522 if (uuid->size != 128)
528 uuids_start[1] = EIR_UUID128_ALL;
532 /* Stop if not enough space to put next UUID */
533 if ((ptr - data) + 16 > len) {
534 uuids_start[1] = EIR_UUID128_SOME;
538 memcpy(ptr, uuid->uuid, 16);
540 uuids_start[0] += 16;
546 static void create_eir(struct hci_dev *hdev, u8 *data)
551 name_len = strlen(hdev->dev_name);
557 ptr[1] = EIR_NAME_SHORT;
559 ptr[1] = EIR_NAME_COMPLETE;
561 /* EIR Data length */
562 ptr[0] = name_len + 1;
564 memcpy(ptr + 2, hdev->dev_name, name_len);
566 ptr += (name_len + 2);
569 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
571 ptr[1] = EIR_TX_POWER;
572 ptr[2] = (u8) hdev->inq_tx_power;
577 if (hdev->devid_source > 0) {
579 ptr[1] = EIR_DEVICE_ID;
581 put_unaligned_le16(hdev->devid_source, ptr + 2);
582 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
583 put_unaligned_le16(hdev->devid_product, ptr + 6);
584 put_unaligned_le16(hdev->devid_version, ptr + 8);
589 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
590 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
591 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
594 static void update_eir(struct hci_request *req)
596 struct hci_dev *hdev = req->hdev;
597 struct hci_cp_write_eir cp;
599 if (!hdev_is_powered(hdev))
602 if (!lmp_ext_inq_capable(hdev))
605 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
608 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
611 memset(&cp, 0, sizeof(cp));
613 create_eir(hdev, cp.data);
615 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
618 memcpy(hdev->eir, cp.data, sizeof(cp.data));
620 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
623 static u8 get_service_classes(struct hci_dev *hdev)
625 struct bt_uuid *uuid;
628 list_for_each_entry(uuid, &hdev->uuids, list)
629 val |= uuid->svc_hint;
634 static void update_class(struct hci_request *req)
636 struct hci_dev *hdev = req->hdev;
639 BT_DBG("%s", hdev->name);
641 if (!hdev_is_powered(hdev))
644 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
647 cod[0] = hdev->minor_class;
648 cod[1] = hdev->major_class;
649 cod[2] = get_service_classes(hdev);
651 if (memcmp(cod, hdev->dev_class, 3) == 0)
654 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
656 set_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
659 static void service_cache_off(struct work_struct *work)
661 struct hci_dev *hdev = container_of(work, struct hci_dev,
663 struct hci_request req;
665 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
668 hci_req_init(&req, hdev);
675 hci_dev_unlock(hdev);
677 hci_req_run(&req, NULL);
680 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
682 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
685 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
687 /* Non-mgmt controlled devices get this bit set
688 * implicitly so that pairing works for them, however
689 * for mgmt we require user-space to explicitly enable
692 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
695 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
696 void *data, u16 data_len)
698 struct mgmt_rp_read_info rp;
700 BT_DBG("sock %p %s", sk, hdev->name);
704 memset(&rp, 0, sizeof(rp));
706 bacpy(&rp.bdaddr, &hdev->bdaddr);
708 rp.version = hdev->hci_ver;
709 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
711 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
712 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
714 memcpy(rp.dev_class, hdev->dev_class, 3);
716 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
717 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
719 hci_dev_unlock(hdev);
721 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
725 static void mgmt_pending_free(struct pending_cmd *cmd)
732 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
733 struct hci_dev *hdev, void *data,
736 struct pending_cmd *cmd;
738 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
742 cmd->opcode = opcode;
743 cmd->index = hdev->id;
745 cmd->param = kmalloc(len, GFP_KERNEL);
752 memcpy(cmd->param, data, len);
757 list_add(&cmd->list, &hdev->mgmt_pending);
762 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
763 void (*cb)(struct pending_cmd *cmd,
767 struct pending_cmd *cmd, *tmp;
769 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
770 if (opcode > 0 && cmd->opcode != opcode)
777 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
779 struct pending_cmd *cmd;
781 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
782 if (cmd->opcode == opcode)
789 static void mgmt_pending_remove(struct pending_cmd *cmd)
791 list_del(&cmd->list);
792 mgmt_pending_free(cmd);
795 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
797 __le32 settings = cpu_to_le32(get_current_settings(hdev));
799 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
803 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
806 struct mgmt_mode *cp = data;
807 struct pending_cmd *cmd;
810 BT_DBG("request for %s", hdev->name);
812 if (cp->val != 0x00 && cp->val != 0x01)
813 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
814 MGMT_STATUS_INVALID_PARAMS);
818 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
819 cancel_delayed_work(&hdev->power_off);
822 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
824 err = mgmt_powered(hdev, 1);
829 if (!!cp->val == hdev_is_powered(hdev)) {
830 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
834 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
835 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
840 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
847 queue_work(hdev->req_workqueue, &hdev->power_on);
849 queue_work(hdev->req_workqueue, &hdev->power_off.work);
854 hci_dev_unlock(hdev);
858 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
859 struct sock *skip_sk)
862 struct mgmt_hdr *hdr;
864 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
868 hdr = (void *) skb_put(skb, sizeof(*hdr));
869 hdr->opcode = cpu_to_le16(event);
871 hdr->index = cpu_to_le16(hdev->id);
873 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
874 hdr->len = cpu_to_le16(data_len);
877 memcpy(skb_put(skb, data_len), data, data_len);
880 __net_timestamp(skb);
882 hci_send_to_control(skb, skip_sk);
888 static int new_settings(struct hci_dev *hdev, struct sock *skip)
892 ev = cpu_to_le32(get_current_settings(hdev));
894 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
897 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
900 struct mgmt_cp_set_discoverable *cp = data;
901 struct pending_cmd *cmd;
906 BT_DBG("request for %s", hdev->name);
908 if (!lmp_bredr_capable(hdev))
909 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
910 MGMT_STATUS_NOT_SUPPORTED);
912 if (cp->val != 0x00 && cp->val != 0x01)
913 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
914 MGMT_STATUS_INVALID_PARAMS);
916 timeout = __le16_to_cpu(cp->timeout);
917 if (!cp->val && timeout > 0)
918 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
919 MGMT_STATUS_INVALID_PARAMS);
923 if (!hdev_is_powered(hdev) && timeout > 0) {
924 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
925 MGMT_STATUS_NOT_POWERED);
929 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
930 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
931 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
936 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
937 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
938 MGMT_STATUS_REJECTED);
942 if (!hdev_is_powered(hdev)) {
943 bool changed = false;
945 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
946 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
950 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
955 err = new_settings(hdev, sk);
960 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
961 if (hdev->discov_timeout > 0) {
962 cancel_delayed_work(&hdev->discov_off);
963 hdev->discov_timeout = 0;
966 if (cp->val && timeout > 0) {
967 hdev->discov_timeout = timeout;
968 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
969 msecs_to_jiffies(hdev->discov_timeout * 1000));
972 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
976 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
985 scan |= SCAN_INQUIRY;
987 cancel_delayed_work(&hdev->discov_off);
989 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
991 mgmt_pending_remove(cmd);
994 hdev->discov_timeout = timeout;
997 hci_dev_unlock(hdev);
1001 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1004 struct mgmt_mode *cp = data;
1005 struct pending_cmd *cmd;
1009 BT_DBG("request for %s", hdev->name);
1011 if (!lmp_bredr_capable(hdev))
1012 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1013 MGMT_STATUS_NOT_SUPPORTED);
1015 if (cp->val != 0x00 && cp->val != 0x01)
1016 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1017 MGMT_STATUS_INVALID_PARAMS);
1021 if (!hdev_is_powered(hdev)) {
1022 bool changed = false;
1024 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1028 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1030 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1031 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1034 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1039 err = new_settings(hdev, sk);
1044 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1045 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1046 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1051 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1052 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1056 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1067 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1068 hdev->discov_timeout > 0)
1069 cancel_delayed_work(&hdev->discov_off);
1072 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1074 mgmt_pending_remove(cmd);
1077 hci_dev_unlock(hdev);
1081 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1084 struct mgmt_mode *cp = data;
1087 BT_DBG("request for %s", hdev->name);
1089 if (cp->val != 0x00 && cp->val != 0x01)
1090 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1091 MGMT_STATUS_INVALID_PARAMS);
1096 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1098 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1100 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1104 err = new_settings(hdev, sk);
1107 hci_dev_unlock(hdev);
1111 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1114 struct mgmt_mode *cp = data;
1115 struct pending_cmd *cmd;
1119 BT_DBG("request for %s", hdev->name);
1121 if (!lmp_bredr_capable(hdev))
1122 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1123 MGMT_STATUS_NOT_SUPPORTED);
1125 if (cp->val != 0x00 && cp->val != 0x01)
1126 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1127 MGMT_STATUS_INVALID_PARAMS);
1131 if (!hdev_is_powered(hdev)) {
1132 bool changed = false;
1134 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1135 &hdev->dev_flags)) {
1136 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1140 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1145 err = new_settings(hdev, sk);
1150 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1151 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1158 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1159 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1163 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1169 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1171 mgmt_pending_remove(cmd);
1176 hci_dev_unlock(hdev);
1180 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1182 struct mgmt_mode *cp = data;
1183 struct pending_cmd *cmd;
1187 BT_DBG("request for %s", hdev->name);
1189 if (!lmp_ssp_capable(hdev))
1190 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1191 MGMT_STATUS_NOT_SUPPORTED);
1193 if (cp->val != 0x00 && cp->val != 0x01)
1194 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1195 MGMT_STATUS_INVALID_PARAMS);
1201 if (!hdev_is_powered(hdev)) {
1202 bool changed = false;
1204 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1205 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1209 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1214 err = new_settings(hdev, sk);
1219 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1220 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1225 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1226 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1230 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1236 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1238 mgmt_pending_remove(cmd);
1243 hci_dev_unlock(hdev);
1247 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1249 struct mgmt_mode *cp = data;
1251 BT_DBG("request for %s", hdev->name);
1254 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1255 MGMT_STATUS_NOT_SUPPORTED);
1257 if (cp->val != 0x00 && cp->val != 0x01)
1258 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1259 MGMT_STATUS_INVALID_PARAMS);
1262 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1264 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1266 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1269 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1271 struct mgmt_mode *cp = data;
1272 struct hci_cp_write_le_host_supported hci_cp;
1273 struct pending_cmd *cmd;
1277 BT_DBG("request for %s", hdev->name);
1279 if (!lmp_le_capable(hdev))
1280 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1281 MGMT_STATUS_NOT_SUPPORTED);
1283 if (cp->val != 0x00 && cp->val != 0x01)
1284 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1285 MGMT_STATUS_INVALID_PARAMS);
1290 enabled = lmp_host_le_capable(hdev);
1292 if (!hdev_is_powered(hdev) || val == enabled) {
1293 bool changed = false;
1295 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1296 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1300 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1305 err = new_settings(hdev, sk);
1310 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1311 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1316 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1322 memset(&hci_cp, 0, sizeof(hci_cp));
1326 hci_cp.simul = lmp_le_br_capable(hdev);
1329 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1332 mgmt_pending_remove(cmd);
1335 hci_dev_unlock(hdev);
1339 /* This is a helper function to test for pending mgmt commands that can
1340 * cause CoD or EIR HCI commands. We can only allow one such pending
1341 * mgmt command at a time since otherwise we cannot easily track what
1342 * the current values are, will be, and based on that calculate if a new
1343 * HCI command needs to be sent and if yes with what value.
1345 static bool pending_eir_or_class(struct hci_dev *hdev)
1347 struct pending_cmd *cmd;
1349 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1350 switch (cmd->opcode) {
1351 case MGMT_OP_ADD_UUID:
1352 case MGMT_OP_REMOVE_UUID:
1353 case MGMT_OP_SET_DEV_CLASS:
1354 case MGMT_OP_SET_POWERED:
1362 static const u8 bluetooth_base_uuid[] = {
1363 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1364 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1367 static u8 get_uuid_size(const u8 *uuid)
1371 if (memcmp(uuid, bluetooth_base_uuid, 12))
1374 val = get_unaligned_le32(&uuid[12]);
1381 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1383 struct pending_cmd *cmd;
1387 cmd = mgmt_pending_find(mgmt_op, hdev);
1391 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1392 hdev->dev_class, 3);
1394 mgmt_pending_remove(cmd);
1397 hci_dev_unlock(hdev);
1400 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1402 BT_DBG("status 0x%02x", status);
1404 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1407 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1409 struct mgmt_cp_add_uuid *cp = data;
1410 struct pending_cmd *cmd;
1411 struct hci_request req;
1412 struct bt_uuid *uuid;
1415 BT_DBG("request for %s", hdev->name);
1419 if (pending_eir_or_class(hdev)) {
1420 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1425 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1431 memcpy(uuid->uuid, cp->uuid, 16);
1432 uuid->svc_hint = cp->svc_hint;
1433 uuid->size = get_uuid_size(cp->uuid);
1435 list_add_tail(&uuid->list, &hdev->uuids);
1437 hci_req_init(&req, hdev);
1442 err = hci_req_run(&req, add_uuid_complete);
1444 if (err != -ENODATA)
1447 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1448 hdev->dev_class, 3);
1452 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1461 hci_dev_unlock(hdev);
1465 static bool enable_service_cache(struct hci_dev *hdev)
1467 if (!hdev_is_powered(hdev))
1470 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1471 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1479 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1481 BT_DBG("status 0x%02x", status);
1483 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1486 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1489 struct mgmt_cp_remove_uuid *cp = data;
1490 struct pending_cmd *cmd;
1491 struct bt_uuid *match, *tmp;
1492 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1493 struct hci_request req;
1496 BT_DBG("request for %s", hdev->name);
1500 if (pending_eir_or_class(hdev)) {
1501 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1506 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1507 err = hci_uuids_clear(hdev);
1509 if (enable_service_cache(hdev)) {
1510 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1511 0, hdev->dev_class, 3);
1520 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1521 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1524 list_del(&match->list);
1530 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1531 MGMT_STATUS_INVALID_PARAMS);
1536 hci_req_init(&req, hdev);
1541 err = hci_req_run(&req, remove_uuid_complete);
1543 if (err != -ENODATA)
1546 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1547 hdev->dev_class, 3);
1551 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1560 hci_dev_unlock(hdev);
1564 static void set_class_complete(struct hci_dev *hdev, u8 status)
1566 BT_DBG("status 0x%02x", status);
1568 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1571 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1574 struct mgmt_cp_set_dev_class *cp = data;
1575 struct pending_cmd *cmd;
1576 struct hci_request req;
1579 BT_DBG("request for %s", hdev->name);
1581 if (!lmp_bredr_capable(hdev))
1582 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1583 MGMT_STATUS_NOT_SUPPORTED);
1587 if (pending_eir_or_class(hdev)) {
1588 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1593 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1594 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1595 MGMT_STATUS_INVALID_PARAMS);
1599 hdev->major_class = cp->major;
1600 hdev->minor_class = cp->minor;
1602 if (!hdev_is_powered(hdev)) {
1603 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1604 hdev->dev_class, 3);
1608 hci_req_init(&req, hdev);
1610 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1611 hci_dev_unlock(hdev);
1612 cancel_delayed_work_sync(&hdev->service_cache);
1619 err = hci_req_run(&req, set_class_complete);
1621 if (err != -ENODATA)
1624 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1625 hdev->dev_class, 3);
1629 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1638 hci_dev_unlock(hdev);
1642 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1645 struct mgmt_cp_load_link_keys *cp = data;
1646 u16 key_count, expected_len;
1649 key_count = __le16_to_cpu(cp->key_count);
1651 expected_len = sizeof(*cp) + key_count *
1652 sizeof(struct mgmt_link_key_info);
1653 if (expected_len != len) {
1654 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1656 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1657 MGMT_STATUS_INVALID_PARAMS);
1660 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1661 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1662 MGMT_STATUS_INVALID_PARAMS);
1664 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1667 for (i = 0; i < key_count; i++) {
1668 struct mgmt_link_key_info *key = &cp->keys[i];
1670 if (key->addr.type != BDADDR_BREDR)
1671 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1672 MGMT_STATUS_INVALID_PARAMS);
1677 hci_link_keys_clear(hdev);
1679 set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1682 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1684 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1686 for (i = 0; i < key_count; i++) {
1687 struct mgmt_link_key_info *key = &cp->keys[i];
1689 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1690 key->type, key->pin_len);
1693 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1695 hci_dev_unlock(hdev);
1700 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1701 u8 addr_type, struct sock *skip_sk)
1703 struct mgmt_ev_device_unpaired ev;
1705 bacpy(&ev.addr.bdaddr, bdaddr);
1706 ev.addr.type = addr_type;
1708 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1712 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1715 struct mgmt_cp_unpair_device *cp = data;
1716 struct mgmt_rp_unpair_device rp;
1717 struct hci_cp_disconnect dc;
1718 struct pending_cmd *cmd;
1719 struct hci_conn *conn;
1722 memset(&rp, 0, sizeof(rp));
1723 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1724 rp.addr.type = cp->addr.type;
1726 if (!bdaddr_type_is_valid(cp->addr.type))
1727 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1728 MGMT_STATUS_INVALID_PARAMS,
1731 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1732 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1733 MGMT_STATUS_INVALID_PARAMS,
1738 if (!hdev_is_powered(hdev)) {
1739 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1740 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1744 if (cp->addr.type == BDADDR_BREDR)
1745 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1747 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1750 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1751 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1755 if (cp->disconnect) {
1756 if (cp->addr.type == BDADDR_BREDR)
1757 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1760 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1767 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1769 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1773 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1780 dc.handle = cpu_to_le16(conn->handle);
1781 dc.reason = 0x13; /* Remote User Terminated Connection */
1782 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1784 mgmt_pending_remove(cmd);
1787 hci_dev_unlock(hdev);
1791 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1794 struct mgmt_cp_disconnect *cp = data;
1795 struct mgmt_rp_disconnect rp;
1796 struct hci_cp_disconnect dc;
1797 struct pending_cmd *cmd;
1798 struct hci_conn *conn;
1803 memset(&rp, 0, sizeof(rp));
1804 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1805 rp.addr.type = cp->addr.type;
1807 if (!bdaddr_type_is_valid(cp->addr.type))
1808 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1809 MGMT_STATUS_INVALID_PARAMS,
1814 if (!test_bit(HCI_UP, &hdev->flags)) {
1815 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1816 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1820 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1821 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1822 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1826 if (cp->addr.type == BDADDR_BREDR)
1827 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1830 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1832 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1833 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1834 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1838 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1844 dc.handle = cpu_to_le16(conn->handle);
1845 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1847 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1849 mgmt_pending_remove(cmd);
1852 hci_dev_unlock(hdev);
1856 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1858 switch (link_type) {
1860 switch (addr_type) {
1861 case ADDR_LE_DEV_PUBLIC:
1862 return BDADDR_LE_PUBLIC;
1865 /* Fallback to LE Random address type */
1866 return BDADDR_LE_RANDOM;
1870 /* Fallback to BR/EDR type */
1871 return BDADDR_BREDR;
1875 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1878 struct mgmt_rp_get_connections *rp;
1888 if (!hdev_is_powered(hdev)) {
1889 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1890 MGMT_STATUS_NOT_POWERED);
1895 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1896 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1900 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1901 rp = kmalloc(rp_len, GFP_KERNEL);
1908 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1909 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1911 bacpy(&rp->addr[i].bdaddr, &c->dst);
1912 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1913 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1918 rp->conn_count = cpu_to_le16(i);
1920 /* Recalculate length in case of filtered SCO connections, etc */
1921 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1923 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1929 hci_dev_unlock(hdev);
1933 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1934 struct mgmt_cp_pin_code_neg_reply *cp)
1936 struct pending_cmd *cmd;
1939 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
1944 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1945 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
1947 mgmt_pending_remove(cmd);
1952 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
1955 struct hci_conn *conn;
1956 struct mgmt_cp_pin_code_reply *cp = data;
1957 struct hci_cp_pin_code_reply reply;
1958 struct pending_cmd *cmd;
1965 if (!hdev_is_powered(hdev)) {
1966 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1967 MGMT_STATUS_NOT_POWERED);
1971 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
1973 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1974 MGMT_STATUS_NOT_CONNECTED);
1978 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
1979 struct mgmt_cp_pin_code_neg_reply ncp;
1981 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
1983 BT_ERR("PIN code is not 16 bytes long");
1985 err = send_pin_code_neg_reply(sk, hdev, &ncp);
1987 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1988 MGMT_STATUS_INVALID_PARAMS);
1993 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
1999 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2000 reply.pin_len = cp->pin_len;
2001 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2003 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2005 mgmt_pending_remove(cmd);
2008 hci_dev_unlock(hdev);
2012 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2015 struct mgmt_cp_set_io_capability *cp = data;
2021 hdev->io_capability = cp->io_capability;
2023 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2024 hdev->io_capability);
2026 hci_dev_unlock(hdev);
2028 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2032 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2034 struct hci_dev *hdev = conn->hdev;
2035 struct pending_cmd *cmd;
2037 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2038 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2041 if (cmd->user_data != conn)
2050 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2052 struct mgmt_rp_pair_device rp;
2053 struct hci_conn *conn = cmd->user_data;
2055 bacpy(&rp.addr.bdaddr, &conn->dst);
2056 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2058 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2061 /* So we don't get further callbacks for this connection */
2062 conn->connect_cfm_cb = NULL;
2063 conn->security_cfm_cb = NULL;
2064 conn->disconn_cfm_cb = NULL;
2068 mgmt_pending_remove(cmd);
2071 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2073 struct pending_cmd *cmd;
2075 BT_DBG("status %u", status);
2077 cmd = find_pairing(conn);
2079 BT_DBG("Unable to find a pending command");
2081 pairing_complete(cmd, mgmt_status(status));
2084 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2086 struct pending_cmd *cmd;
2088 BT_DBG("status %u", status);
2093 cmd = find_pairing(conn);
2095 BT_DBG("Unable to find a pending command");
2097 pairing_complete(cmd, mgmt_status(status));
2100 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2103 struct mgmt_cp_pair_device *cp = data;
2104 struct mgmt_rp_pair_device rp;
2105 struct pending_cmd *cmd;
2106 u8 sec_level, auth_type;
2107 struct hci_conn *conn;
2112 memset(&rp, 0, sizeof(rp));
2113 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2114 rp.addr.type = cp->addr.type;
2116 if (!bdaddr_type_is_valid(cp->addr.type))
2117 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2118 MGMT_STATUS_INVALID_PARAMS,
2123 if (!hdev_is_powered(hdev)) {
2124 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2125 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2129 sec_level = BT_SECURITY_MEDIUM;
2130 if (cp->io_cap == 0x03)
2131 auth_type = HCI_AT_DEDICATED_BONDING;
2133 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2135 if (cp->addr.type == BDADDR_BREDR)
2136 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2137 cp->addr.type, sec_level, auth_type);
2139 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2140 cp->addr.type, sec_level, auth_type);
2145 if (PTR_ERR(conn) == -EBUSY)
2146 status = MGMT_STATUS_BUSY;
2148 status = MGMT_STATUS_CONNECT_FAILED;
2150 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2156 if (conn->connect_cfm_cb) {
2158 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2159 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2163 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2170 /* For LE, just connecting isn't a proof that the pairing finished */
2171 if (cp->addr.type == BDADDR_BREDR)
2172 conn->connect_cfm_cb = pairing_complete_cb;
2174 conn->connect_cfm_cb = le_connect_complete_cb;
2176 conn->security_cfm_cb = pairing_complete_cb;
2177 conn->disconn_cfm_cb = pairing_complete_cb;
2178 conn->io_capability = cp->io_cap;
2179 cmd->user_data = conn;
2181 if (conn->state == BT_CONNECTED &&
2182 hci_conn_security(conn, sec_level, auth_type))
2183 pairing_complete(cmd, 0);
2188 hci_dev_unlock(hdev);
2192 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2195 struct mgmt_addr_info *addr = data;
2196 struct pending_cmd *cmd;
2197 struct hci_conn *conn;
2204 if (!hdev_is_powered(hdev)) {
2205 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2206 MGMT_STATUS_NOT_POWERED);
2210 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2212 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2213 MGMT_STATUS_INVALID_PARAMS);
2217 conn = cmd->user_data;
2219 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2220 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2221 MGMT_STATUS_INVALID_PARAMS);
2225 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2227 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2228 addr, sizeof(*addr));
2230 hci_dev_unlock(hdev);
2234 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2235 bdaddr_t *bdaddr, u8 type, u16 mgmt_op,
2236 u16 hci_op, __le32 passkey)
2238 struct pending_cmd *cmd;
2239 struct hci_conn *conn;
2244 if (!hdev_is_powered(hdev)) {
2245 err = cmd_status(sk, hdev->id, mgmt_op,
2246 MGMT_STATUS_NOT_POWERED);
2250 if (type == BDADDR_BREDR)
2251 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
2253 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
2256 err = cmd_status(sk, hdev->id, mgmt_op,
2257 MGMT_STATUS_NOT_CONNECTED);
2261 if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
2262 /* Continue with pairing via SMP */
2263 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2266 err = cmd_status(sk, hdev->id, mgmt_op,
2267 MGMT_STATUS_SUCCESS);
2269 err = cmd_status(sk, hdev->id, mgmt_op,
2270 MGMT_STATUS_FAILED);
2275 cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr));
2281 /* Continue with pairing via HCI */
2282 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2283 struct hci_cp_user_passkey_reply cp;
2285 bacpy(&cp.bdaddr, bdaddr);
2286 cp.passkey = passkey;
2287 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2289 err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr);
2292 mgmt_pending_remove(cmd);
2295 hci_dev_unlock(hdev);
2299 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2300 void *data, u16 len)
2302 struct mgmt_cp_pin_code_neg_reply *cp = data;
2306 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2307 MGMT_OP_PIN_CODE_NEG_REPLY,
2308 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2311 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2314 struct mgmt_cp_user_confirm_reply *cp = data;
2318 if (len != sizeof(*cp))
2319 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2320 MGMT_STATUS_INVALID_PARAMS);
2322 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2323 MGMT_OP_USER_CONFIRM_REPLY,
2324 HCI_OP_USER_CONFIRM_REPLY, 0);
2327 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2328 void *data, u16 len)
2330 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2334 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2335 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2336 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2339 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2342 struct mgmt_cp_user_passkey_reply *cp = data;
2346 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2347 MGMT_OP_USER_PASSKEY_REPLY,
2348 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2351 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2352 void *data, u16 len)
2354 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2358 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2359 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2360 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2363 static void update_name(struct hci_request *req, const char *name)
2365 struct hci_cp_write_local_name cp;
2367 memcpy(cp.name, name, sizeof(cp.name));
2369 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2372 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2375 struct mgmt_cp_set_local_name *cp = data;
2376 struct pending_cmd *cmd;
2377 struct hci_request req;
2384 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2386 if (!hdev_is_powered(hdev)) {
2387 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2389 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2394 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2400 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2406 hci_req_init(&req, hdev);
2407 update_name(&req, cp->name);
2408 err = hci_req_run(&req, NULL);
2410 mgmt_pending_remove(cmd);
2413 hci_dev_unlock(hdev);
2417 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2418 void *data, u16 data_len)
2420 struct pending_cmd *cmd;
2423 BT_DBG("%s", hdev->name);
2427 if (!hdev_is_powered(hdev)) {
2428 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2429 MGMT_STATUS_NOT_POWERED);
2433 if (!lmp_ssp_capable(hdev)) {
2434 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2435 MGMT_STATUS_NOT_SUPPORTED);
2439 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2440 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2445 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2451 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2453 mgmt_pending_remove(cmd);
2456 hci_dev_unlock(hdev);
2460 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2461 void *data, u16 len)
2463 struct mgmt_cp_add_remote_oob_data *cp = data;
2467 BT_DBG("%s ", hdev->name);
2471 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2474 status = MGMT_STATUS_FAILED;
2476 status = MGMT_STATUS_SUCCESS;
2478 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2479 &cp->addr, sizeof(cp->addr));
2481 hci_dev_unlock(hdev);
2485 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2486 void *data, u16 len)
2488 struct mgmt_cp_remove_remote_oob_data *cp = data;
2492 BT_DBG("%s", hdev->name);
2496 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2498 status = MGMT_STATUS_INVALID_PARAMS;
2500 status = MGMT_STATUS_SUCCESS;
2502 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2503 status, &cp->addr, sizeof(cp->addr));
2505 hci_dev_unlock(hdev);
2509 int mgmt_interleaved_discovery(struct hci_dev *hdev)
2513 BT_DBG("%s", hdev->name);
2517 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
2519 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2521 hci_dev_unlock(hdev);
2526 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2527 void *data, u16 len)
2529 struct mgmt_cp_start_discovery *cp = data;
2530 struct pending_cmd *cmd;
2533 BT_DBG("%s", hdev->name);
2537 if (!hdev_is_powered(hdev)) {
2538 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2539 MGMT_STATUS_NOT_POWERED);
2543 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2544 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2549 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2550 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2555 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2561 hdev->discovery.type = cp->type;
2563 switch (hdev->discovery.type) {
2564 case DISCOV_TYPE_BREDR:
2565 if (!lmp_bredr_capable(hdev)) {
2566 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2567 MGMT_STATUS_NOT_SUPPORTED);
2568 mgmt_pending_remove(cmd);
2572 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2575 case DISCOV_TYPE_LE:
2576 if (!lmp_host_le_capable(hdev)) {
2577 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2578 MGMT_STATUS_NOT_SUPPORTED);
2579 mgmt_pending_remove(cmd);
2583 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2584 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2587 case DISCOV_TYPE_INTERLEAVED:
2588 if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
2589 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2590 MGMT_STATUS_NOT_SUPPORTED);
2591 mgmt_pending_remove(cmd);
2595 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, LE_SCAN_WIN,
2596 LE_SCAN_TIMEOUT_BREDR_LE);
2600 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2601 MGMT_STATUS_INVALID_PARAMS);
2602 mgmt_pending_remove(cmd);
2607 mgmt_pending_remove(cmd);
2609 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2612 hci_dev_unlock(hdev);
2616 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2619 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2620 struct pending_cmd *cmd;
2621 struct hci_cp_remote_name_req_cancel cp;
2622 struct inquiry_entry *e;
2625 BT_DBG("%s", hdev->name);
2629 if (!hci_discovery_active(hdev)) {
2630 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2631 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2632 sizeof(mgmt_cp->type));
2636 if (hdev->discovery.type != mgmt_cp->type) {
2637 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2638 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2639 sizeof(mgmt_cp->type));
2643 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2649 switch (hdev->discovery.state) {
2650 case DISCOVERY_FINDING:
2651 if (test_bit(HCI_INQUIRY, &hdev->flags))
2652 err = hci_cancel_inquiry(hdev);
2654 err = hci_cancel_le_scan(hdev);
2658 case DISCOVERY_RESOLVING:
2659 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2662 mgmt_pending_remove(cmd);
2663 err = cmd_complete(sk, hdev->id,
2664 MGMT_OP_STOP_DISCOVERY, 0,
2666 sizeof(mgmt_cp->type));
2667 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2671 bacpy(&cp.bdaddr, &e->data.bdaddr);
2672 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2678 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2683 mgmt_pending_remove(cmd);
2685 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2688 hci_dev_unlock(hdev);
2692 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2695 struct mgmt_cp_confirm_name *cp = data;
2696 struct inquiry_entry *e;
2699 BT_DBG("%s", hdev->name);
2703 if (!hci_discovery_active(hdev)) {
2704 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2705 MGMT_STATUS_FAILED);
2709 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2711 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2712 MGMT_STATUS_INVALID_PARAMS);
2716 if (cp->name_known) {
2717 e->name_state = NAME_KNOWN;
2720 e->name_state = NAME_NEEDED;
2721 hci_inquiry_cache_update_resolve(hdev, e);
2724 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2728 hci_dev_unlock(hdev);
2732 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2735 struct mgmt_cp_block_device *cp = data;
2739 BT_DBG("%s", hdev->name);
2741 if (!bdaddr_type_is_valid(cp->addr.type))
2742 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2743 MGMT_STATUS_INVALID_PARAMS,
2744 &cp->addr, sizeof(cp->addr));
2748 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2750 status = MGMT_STATUS_FAILED;
2752 status = MGMT_STATUS_SUCCESS;
2754 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2755 &cp->addr, sizeof(cp->addr));
2757 hci_dev_unlock(hdev);
2762 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2765 struct mgmt_cp_unblock_device *cp = data;
2769 BT_DBG("%s", hdev->name);
2771 if (!bdaddr_type_is_valid(cp->addr.type))
2772 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
2773 MGMT_STATUS_INVALID_PARAMS,
2774 &cp->addr, sizeof(cp->addr));
2778 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2780 status = MGMT_STATUS_INVALID_PARAMS;
2782 status = MGMT_STATUS_SUCCESS;
2784 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2785 &cp->addr, sizeof(cp->addr));
2787 hci_dev_unlock(hdev);
2792 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2795 struct mgmt_cp_set_device_id *cp = data;
2796 struct hci_request req;
2800 BT_DBG("%s", hdev->name);
2802 source = __le16_to_cpu(cp->source);
2804 if (source > 0x0002)
2805 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2806 MGMT_STATUS_INVALID_PARAMS);
2810 hdev->devid_source = source;
2811 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2812 hdev->devid_product = __le16_to_cpu(cp->product);
2813 hdev->devid_version = __le16_to_cpu(cp->version);
2815 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2817 hci_req_init(&req, hdev);
2819 hci_req_run(&req, NULL);
2821 hci_dev_unlock(hdev);
2826 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2827 void *data, u16 len)
2829 struct mgmt_mode *cp = data;
2830 struct hci_cp_write_page_scan_activity acp;
2834 BT_DBG("%s", hdev->name);
2836 if (!lmp_bredr_capable(hdev))
2837 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2838 MGMT_STATUS_NOT_SUPPORTED);
2840 if (cp->val != 0x00 && cp->val != 0x01)
2841 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2842 MGMT_STATUS_INVALID_PARAMS);
2844 if (!hdev_is_powered(hdev))
2845 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2846 MGMT_STATUS_NOT_POWERED);
2848 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2849 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2850 MGMT_STATUS_REJECTED);
2855 type = PAGE_SCAN_TYPE_INTERLACED;
2857 /* 160 msec page scan interval */
2858 acp.interval = __constant_cpu_to_le16(0x0100);
2860 type = PAGE_SCAN_TYPE_STANDARD; /* default */
2862 /* default 1.28 sec page scan */
2863 acp.interval = __constant_cpu_to_le16(0x0800);
2866 /* default 11.25 msec page scan window */
2867 acp.window = __constant_cpu_to_le16(0x0012);
2869 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp),
2872 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2873 MGMT_STATUS_FAILED);
2877 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
2879 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2880 MGMT_STATUS_FAILED);
2884 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 0,
2887 hci_dev_unlock(hdev);
2891 static bool ltk_is_valid(struct mgmt_ltk_info *key)
2893 if (key->authenticated != 0x00 && key->authenticated != 0x01)
2895 if (key->master != 0x00 && key->master != 0x01)
2897 if (!bdaddr_type_is_le(key->addr.type))
2902 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2903 void *cp_data, u16 len)
2905 struct mgmt_cp_load_long_term_keys *cp = cp_data;
2906 u16 key_count, expected_len;
2909 key_count = __le16_to_cpu(cp->key_count);
2911 expected_len = sizeof(*cp) + key_count *
2912 sizeof(struct mgmt_ltk_info);
2913 if (expected_len != len) {
2914 BT_ERR("load_keys: expected %u bytes, got %u bytes",
2916 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
2917 MGMT_STATUS_INVALID_PARAMS);
2920 BT_DBG("%s key_count %u", hdev->name, key_count);
2922 for (i = 0; i < key_count; i++) {
2923 struct mgmt_ltk_info *key = &cp->keys[i];
2925 if (!ltk_is_valid(key))
2926 return cmd_status(sk, hdev->id,
2927 MGMT_OP_LOAD_LONG_TERM_KEYS,
2928 MGMT_STATUS_INVALID_PARAMS);
2933 hci_smp_ltks_clear(hdev);
2935 for (i = 0; i < key_count; i++) {
2936 struct mgmt_ltk_info *key = &cp->keys[i];
2942 type = HCI_SMP_LTK_SLAVE;
2944 hci_add_ltk(hdev, &key->addr.bdaddr,
2945 bdaddr_to_le(key->addr.type),
2946 type, 0, key->authenticated, key->val,
2947 key->enc_size, key->ediv, key->rand);
2950 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
2953 hci_dev_unlock(hdev);
2958 static const struct mgmt_handler {
2959 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
2963 } mgmt_handlers[] = {
2964 { NULL }, /* 0x0000 (no command) */
2965 { read_version, false, MGMT_READ_VERSION_SIZE },
2966 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
2967 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
2968 { read_controller_info, false, MGMT_READ_INFO_SIZE },
2969 { set_powered, false, MGMT_SETTING_SIZE },
2970 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
2971 { set_connectable, false, MGMT_SETTING_SIZE },
2972 { set_fast_connectable, false, MGMT_SETTING_SIZE },
2973 { set_pairable, false, MGMT_SETTING_SIZE },
2974 { set_link_security, false, MGMT_SETTING_SIZE },
2975 { set_ssp, false, MGMT_SETTING_SIZE },
2976 { set_hs, false, MGMT_SETTING_SIZE },
2977 { set_le, false, MGMT_SETTING_SIZE },
2978 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
2979 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
2980 { add_uuid, false, MGMT_ADD_UUID_SIZE },
2981 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
2982 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
2983 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
2984 { disconnect, false, MGMT_DISCONNECT_SIZE },
2985 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
2986 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
2987 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
2988 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
2989 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
2990 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
2991 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
2992 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
2993 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
2994 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
2995 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
2996 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
2997 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
2998 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
2999 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3000 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3001 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3002 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3003 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3004 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3008 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3012 struct mgmt_hdr *hdr;
3013 u16 opcode, index, len;
3014 struct hci_dev *hdev = NULL;
3015 const struct mgmt_handler *handler;
3018 BT_DBG("got %zu bytes", msglen);
3020 if (msglen < sizeof(*hdr))
3023 buf = kmalloc(msglen, GFP_KERNEL);
3027 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3033 opcode = __le16_to_cpu(hdr->opcode);
3034 index = __le16_to_cpu(hdr->index);
3035 len = __le16_to_cpu(hdr->len);
3037 if (len != msglen - sizeof(*hdr)) {
3042 if (index != MGMT_INDEX_NONE) {
3043 hdev = hci_dev_get(index);
3045 err = cmd_status(sk, index, opcode,
3046 MGMT_STATUS_INVALID_INDEX);
3051 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3052 mgmt_handlers[opcode].func == NULL) {
3053 BT_DBG("Unknown op %u", opcode);
3054 err = cmd_status(sk, index, opcode,
3055 MGMT_STATUS_UNKNOWN_COMMAND);
3059 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3060 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3061 err = cmd_status(sk, index, opcode,
3062 MGMT_STATUS_INVALID_INDEX);
3066 handler = &mgmt_handlers[opcode];
3068 if ((handler->var_len && len < handler->data_len) ||
3069 (!handler->var_len && len != handler->data_len)) {
3070 err = cmd_status(sk, index, opcode,
3071 MGMT_STATUS_INVALID_PARAMS);
3076 mgmt_init_hdev(sk, hdev);
3078 cp = buf + sizeof(*hdr);
3080 err = handler->func(sk, hdev, cp, len);
3094 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
3098 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
3099 mgmt_pending_remove(cmd);
3102 int mgmt_index_added(struct hci_dev *hdev)
3104 if (!mgmt_valid_hdev(hdev))
3107 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3110 int mgmt_index_removed(struct hci_dev *hdev)
3112 u8 status = MGMT_STATUS_INVALID_INDEX;
3114 if (!mgmt_valid_hdev(hdev))
3117 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3119 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3124 struct hci_dev *hdev;
3128 static void settings_rsp(struct pending_cmd *cmd, void *data)
3130 struct cmd_lookup *match = data;
3132 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
3134 list_del(&cmd->list);
3136 if (match->sk == NULL) {
3137 match->sk = cmd->sk;
3138 sock_hold(match->sk);
3141 mgmt_pending_free(cmd);
3144 static void set_bredr_scan(struct hci_request *req)
3146 struct hci_dev *hdev = req->hdev;
3149 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3151 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3152 scan |= SCAN_INQUIRY;
3155 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3158 static void powered_complete(struct hci_dev *hdev, u8 status)
3160 struct cmd_lookup match = { NULL, hdev };
3162 BT_DBG("status 0x%02x", status);
3166 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3168 new_settings(hdev, match.sk);
3170 hci_dev_unlock(hdev);
3176 static int powered_update_hci(struct hci_dev *hdev)
3178 struct hci_request req;
3181 hci_req_init(&req, hdev);
3183 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3184 !lmp_host_ssp_capable(hdev)) {
3187 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3190 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
3191 struct hci_cp_write_le_host_supported cp;
3194 cp.simul = lmp_le_br_capable(hdev);
3196 /* Check first if we already have the right
3197 * host state (host features set)
3199 if (cp.le != lmp_host_le_capable(hdev) ||
3200 cp.simul != lmp_host_le_br_capable(hdev))
3201 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3205 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3206 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3207 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3208 sizeof(link_sec), &link_sec);
3210 if (lmp_bredr_capable(hdev)) {
3211 set_bredr_scan(&req);
3213 update_name(&req, hdev->dev_name);
3217 return hci_req_run(&req, powered_complete);
3220 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3222 struct cmd_lookup match = { NULL, hdev };
3223 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3224 u8 zero_cod[] = { 0, 0, 0 };
3227 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3231 if (powered_update_hci(hdev) == 0)
3234 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3239 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3240 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3242 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3243 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3244 zero_cod, sizeof(zero_cod), NULL);
3247 err = new_settings(hdev, match.sk);
3255 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3257 struct cmd_lookup match = { NULL, hdev };
3258 bool changed = false;
3262 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3265 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3269 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3273 err = new_settings(hdev, match.sk);
3281 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3283 struct cmd_lookup match = { NULL, hdev };
3284 bool changed = false;
3288 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3291 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3295 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp,
3299 err = new_settings(hdev, match.sk);
3307 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3309 u8 mgmt_err = mgmt_status(status);
3311 if (scan & SCAN_PAGE)
3312 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3313 cmd_status_rsp, &mgmt_err);
3315 if (scan & SCAN_INQUIRY)
3316 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3317 cmd_status_rsp, &mgmt_err);
3322 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3325 struct mgmt_ev_new_link_key ev;
3327 memset(&ev, 0, sizeof(ev));
3329 ev.store_hint = persistent;
3330 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3331 ev.key.addr.type = BDADDR_BREDR;
3332 ev.key.type = key->type;
3333 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3334 ev.key.pin_len = key->pin_len;
3336 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3339 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3341 struct mgmt_ev_new_long_term_key ev;
3343 memset(&ev, 0, sizeof(ev));
3345 ev.store_hint = persistent;
3346 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3347 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3348 ev.key.authenticated = key->authenticated;
3349 ev.key.enc_size = key->enc_size;
3350 ev.key.ediv = key->ediv;
3352 if (key->type == HCI_SMP_LTK)
3355 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3356 memcpy(ev.key.val, key->val, sizeof(key->val));
3358 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3362 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3363 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3367 struct mgmt_ev_device_connected *ev = (void *) buf;
3370 bacpy(&ev->addr.bdaddr, bdaddr);
3371 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3373 ev->flags = __cpu_to_le32(flags);
3376 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3379 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3380 eir_len = eir_append_data(ev->eir, eir_len,
3381 EIR_CLASS_OF_DEV, dev_class, 3);
3383 ev->eir_len = cpu_to_le16(eir_len);
3385 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3386 sizeof(*ev) + eir_len, NULL);
3389 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3391 struct mgmt_cp_disconnect *cp = cmd->param;
3392 struct sock **sk = data;
3393 struct mgmt_rp_disconnect rp;
3395 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3396 rp.addr.type = cp->addr.type;
3398 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3404 mgmt_pending_remove(cmd);
3407 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3409 struct hci_dev *hdev = data;
3410 struct mgmt_cp_unpair_device *cp = cmd->param;
3411 struct mgmt_rp_unpair_device rp;
3413 memset(&rp, 0, sizeof(rp));
3414 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3415 rp.addr.type = cp->addr.type;
3417 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3419 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3421 mgmt_pending_remove(cmd);
3424 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3425 u8 link_type, u8 addr_type, u8 reason)
3427 struct mgmt_ev_device_disconnected ev;
3428 struct sock *sk = NULL;
3431 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3433 bacpy(&ev.addr.bdaddr, bdaddr);
3434 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3437 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3443 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3449 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3450 u8 link_type, u8 addr_type, u8 status)
3452 struct mgmt_rp_disconnect rp;
3453 struct pending_cmd *cmd;
3456 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3459 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3463 bacpy(&rp.addr.bdaddr, bdaddr);
3464 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3466 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3467 mgmt_status(status), &rp, sizeof(rp));
3469 mgmt_pending_remove(cmd);
3474 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3475 u8 addr_type, u8 status)
3477 struct mgmt_ev_connect_failed ev;
3479 bacpy(&ev.addr.bdaddr, bdaddr);
3480 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3481 ev.status = mgmt_status(status);
3483 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3486 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3488 struct mgmt_ev_pin_code_request ev;
3490 bacpy(&ev.addr.bdaddr, bdaddr);
3491 ev.addr.type = BDADDR_BREDR;
3494 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3498 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3501 struct pending_cmd *cmd;
3502 struct mgmt_rp_pin_code_reply rp;
3505 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3509 bacpy(&rp.addr.bdaddr, bdaddr);
3510 rp.addr.type = BDADDR_BREDR;
3512 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3513 mgmt_status(status), &rp, sizeof(rp));
3515 mgmt_pending_remove(cmd);
3520 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3523 struct pending_cmd *cmd;
3524 struct mgmt_rp_pin_code_reply rp;
3527 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3531 bacpy(&rp.addr.bdaddr, bdaddr);
3532 rp.addr.type = BDADDR_BREDR;
3534 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3535 mgmt_status(status), &rp, sizeof(rp));
3537 mgmt_pending_remove(cmd);
3542 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3543 u8 link_type, u8 addr_type, __le32 value,
3546 struct mgmt_ev_user_confirm_request ev;
3548 BT_DBG("%s", hdev->name);
3550 bacpy(&ev.addr.bdaddr, bdaddr);
3551 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3552 ev.confirm_hint = confirm_hint;
3555 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3559 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3560 u8 link_type, u8 addr_type)
3562 struct mgmt_ev_user_passkey_request ev;
3564 BT_DBG("%s", hdev->name);
3566 bacpy(&ev.addr.bdaddr, bdaddr);
3567 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3569 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3573 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3574 u8 link_type, u8 addr_type, u8 status,
3577 struct pending_cmd *cmd;
3578 struct mgmt_rp_user_confirm_reply rp;
3581 cmd = mgmt_pending_find(opcode, hdev);
3585 bacpy(&rp.addr.bdaddr, bdaddr);
3586 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3587 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3590 mgmt_pending_remove(cmd);
3595 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3596 u8 link_type, u8 addr_type, u8 status)
3598 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3599 status, MGMT_OP_USER_CONFIRM_REPLY);
3602 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3603 u8 link_type, u8 addr_type, u8 status)
3605 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3607 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3610 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3611 u8 link_type, u8 addr_type, u8 status)
3613 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3614 status, MGMT_OP_USER_PASSKEY_REPLY);
3617 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3618 u8 link_type, u8 addr_type, u8 status)
3620 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3622 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3625 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3626 u8 link_type, u8 addr_type, u32 passkey,
3629 struct mgmt_ev_passkey_notify ev;
3631 BT_DBG("%s", hdev->name);
3633 bacpy(&ev.addr.bdaddr, bdaddr);
3634 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3635 ev.passkey = __cpu_to_le32(passkey);
3636 ev.entered = entered;
3638 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3641 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3642 u8 addr_type, u8 status)
3644 struct mgmt_ev_auth_failed ev;
3646 bacpy(&ev.addr.bdaddr, bdaddr);
3647 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3648 ev.status = mgmt_status(status);
3650 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3653 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3655 struct cmd_lookup match = { NULL, hdev };
3656 bool changed = false;
3660 u8 mgmt_err = mgmt_status(status);
3661 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3662 cmd_status_rsp, &mgmt_err);
3666 if (test_bit(HCI_AUTH, &hdev->flags)) {
3667 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3670 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3674 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3678 err = new_settings(hdev, match.sk);
3686 static void clear_eir(struct hci_request *req)
3688 struct hci_dev *hdev = req->hdev;
3689 struct hci_cp_write_eir cp;
3691 if (!lmp_ext_inq_capable(hdev))
3694 memset(hdev->eir, 0, sizeof(hdev->eir));
3696 memset(&cp, 0, sizeof(cp));
3698 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3701 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3703 struct cmd_lookup match = { NULL, hdev };
3704 struct hci_request req;
3705 bool changed = false;
3709 u8 mgmt_err = mgmt_status(status);
3711 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
3713 err = new_settings(hdev, NULL);
3715 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3722 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3725 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3729 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
3732 err = new_settings(hdev, match.sk);
3737 hci_req_init(&req, hdev);
3739 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3744 hci_req_run(&req, NULL);
3749 static void sk_lookup(struct pending_cmd *cmd, void *data)
3751 struct cmd_lookup *match = data;
3753 if (match->sk == NULL) {
3754 match->sk = cmd->sk;
3755 sock_hold(match->sk);
3759 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3762 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3765 clear_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
3767 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
3768 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
3769 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
3772 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3781 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
3783 struct pending_cmd *cmd;
3784 struct mgmt_cp_set_local_name ev;
3785 bool changed = false;
3788 if (memcmp(name, hdev->dev_name, sizeof(hdev->dev_name)) != 0) {
3789 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3793 memset(&ev, 0, sizeof(ev));
3794 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3795 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
3797 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3801 /* Always assume that either the short or the complete name has
3802 * changed if there was a pending mgmt command */
3806 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3807 mgmt_status(status));
3811 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, &ev,
3818 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev,
3819 sizeof(ev), cmd ? cmd->sk : NULL);
3821 /* EIR is taken care of separately when powering on the
3822 * adapter so only update them here if this is a name change
3823 * unrelated to power on.
3825 if (!test_bit(HCI_INIT, &hdev->flags)) {
3826 struct hci_request req;
3827 hci_req_init(&req, hdev);
3829 hci_req_run(&req, NULL);
3834 mgmt_pending_remove(cmd);
3838 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
3839 u8 *randomizer, u8 status)
3841 struct pending_cmd *cmd;
3844 BT_DBG("%s status %u", hdev->name, status);
3846 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3851 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3852 mgmt_status(status));
3854 struct mgmt_rp_read_local_oob_data rp;
3856 memcpy(rp.hash, hash, sizeof(rp.hash));
3857 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
3859 err = cmd_complete(cmd->sk, hdev->id,
3860 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
3864 mgmt_pending_remove(cmd);
3869 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3871 struct cmd_lookup match = { NULL, hdev };
3872 bool changed = false;
3876 u8 mgmt_err = mgmt_status(status);
3878 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
3880 err = new_settings(hdev, NULL);
3882 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
3889 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3892 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3896 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
3899 err = new_settings(hdev, match.sk);
3907 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3908 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
3909 ssp, u8 *eir, u16 eir_len)
3912 struct mgmt_ev_device_found *ev = (void *) buf;
3915 /* Leave 5 bytes for a potential CoD field */
3916 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
3919 memset(buf, 0, sizeof(buf));
3921 bacpy(&ev->addr.bdaddr, bdaddr);
3922 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3925 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
3927 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
3930 memcpy(ev->eir, eir, eir_len);
3932 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
3933 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
3936 ev->eir_len = cpu_to_le16(eir_len);
3937 ev_size = sizeof(*ev) + eir_len;
3939 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
3942 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3943 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
3945 struct mgmt_ev_device_found *ev;
3946 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
3949 ev = (struct mgmt_ev_device_found *) buf;
3951 memset(buf, 0, sizeof(buf));
3953 bacpy(&ev->addr.bdaddr, bdaddr);
3954 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3957 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
3960 ev->eir_len = cpu_to_le16(eir_len);
3962 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
3963 sizeof(*ev) + eir_len, NULL);
3966 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3968 struct pending_cmd *cmd;
3972 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3974 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3978 type = hdev->discovery.type;
3980 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3981 &type, sizeof(type));
3982 mgmt_pending_remove(cmd);
3987 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3989 struct pending_cmd *cmd;
3992 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3996 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3997 &hdev->discovery.type, sizeof(hdev->discovery.type));
3998 mgmt_pending_remove(cmd);
4003 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4005 struct mgmt_ev_discovering ev;
4006 struct pending_cmd *cmd;
4008 BT_DBG("%s discovering %u", hdev->name, discovering);
4011 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4013 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4016 u8 type = hdev->discovery.type;
4018 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4020 mgmt_pending_remove(cmd);
4023 memset(&ev, 0, sizeof(ev));
4024 ev.type = hdev->discovery.type;
4025 ev.discovering = discovering;
4027 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4030 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4032 struct pending_cmd *cmd;
4033 struct mgmt_ev_device_blocked ev;
4035 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4037 bacpy(&ev.addr.bdaddr, bdaddr);
4038 ev.addr.type = type;
4040 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4041 cmd ? cmd->sk : NULL);
4044 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4046 struct pending_cmd *cmd;
4047 struct mgmt_ev_device_unblocked ev;
4049 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4051 bacpy(&ev.addr.bdaddr, bdaddr);
4052 ev.addr.type = type;
4054 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4055 cmd ? cmd->sk : NULL);
4058 module_param(enable_hs, bool, 0644);
4059 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");