2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
84 MGMT_EV_INDEX_REMOVED,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
106 * These LE scan and inquiry parameters were chosen according to LE General
107 * Discovery Procedure specification.
109 #define LE_SCAN_TYPE 0x01
110 #define LE_SCAN_WIN 0x12
111 #define LE_SCAN_INT 0x12
112 #define LE_SCAN_TIMEOUT_LE_ONLY 10240 /* TGAP(gen_disc_scan_min) */
113 #define LE_SCAN_TIMEOUT_BREDR_LE 5120 /* TGAP(100)/2 */
115 #define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
116 #define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
118 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
120 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
121 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
124 struct list_head list;
132 /* HCI to MGMT error code conversion table */
133 static u8 mgmt_status_table[] = {
135 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
136 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
137 MGMT_STATUS_FAILED, /* Hardware Failure */
138 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
139 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
140 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
141 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
142 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
143 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
144 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
145 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
146 MGMT_STATUS_BUSY, /* Command Disallowed */
147 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
148 MGMT_STATUS_REJECTED, /* Rejected Security */
149 MGMT_STATUS_REJECTED, /* Rejected Personal */
150 MGMT_STATUS_TIMEOUT, /* Host Timeout */
151 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
152 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
153 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
154 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
155 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
156 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
157 MGMT_STATUS_BUSY, /* Repeated Attempts */
158 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
159 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
161 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
162 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
163 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
165 MGMT_STATUS_FAILED, /* Unspecified Error */
166 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
167 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
168 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
169 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
170 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
171 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
172 MGMT_STATUS_FAILED, /* Unit Link Key Used */
173 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
174 MGMT_STATUS_TIMEOUT, /* Instant Passed */
175 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
176 MGMT_STATUS_FAILED, /* Transaction Collision */
177 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
178 MGMT_STATUS_REJECTED, /* QoS Rejected */
179 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
180 MGMT_STATUS_REJECTED, /* Insufficient Security */
181 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
182 MGMT_STATUS_BUSY, /* Role Switch Pending */
183 MGMT_STATUS_FAILED, /* Slot Violation */
184 MGMT_STATUS_FAILED, /* Role Switch Failed */
185 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
186 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
187 MGMT_STATUS_BUSY, /* Host Busy Pairing */
188 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
189 MGMT_STATUS_BUSY, /* Controller Busy */
190 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
191 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
192 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
193 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
194 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
197 bool mgmt_valid_hdev(struct hci_dev *hdev)
199 return hdev->dev_type == HCI_BREDR;
202 static u8 mgmt_status(u8 hci_status)
204 if (hci_status < ARRAY_SIZE(mgmt_status_table))
205 return mgmt_status_table[hci_status];
207 return MGMT_STATUS_FAILED;
210 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
213 struct mgmt_hdr *hdr;
214 struct mgmt_ev_cmd_status *ev;
217 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
219 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
223 hdr = (void *) skb_put(skb, sizeof(*hdr));
225 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
226 hdr->index = cpu_to_le16(index);
227 hdr->len = cpu_to_le16(sizeof(*ev));
229 ev = (void *) skb_put(skb, sizeof(*ev));
231 ev->opcode = cpu_to_le16(cmd);
233 err = sock_queue_rcv_skb(sk, skb);
240 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
241 void *rp, size_t rp_len)
244 struct mgmt_hdr *hdr;
245 struct mgmt_ev_cmd_complete *ev;
248 BT_DBG("sock %p", sk);
250 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
254 hdr = (void *) skb_put(skb, sizeof(*hdr));
256 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
257 hdr->index = cpu_to_le16(index);
258 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
260 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
261 ev->opcode = cpu_to_le16(cmd);
265 memcpy(ev->data, rp, rp_len);
267 err = sock_queue_rcv_skb(sk, skb);
274 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
277 struct mgmt_rp_read_version rp;
279 BT_DBG("sock %p", sk);
281 rp.version = MGMT_VERSION;
282 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
284 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
288 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
291 struct mgmt_rp_read_commands *rp;
292 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
293 const u16 num_events = ARRAY_SIZE(mgmt_events);
298 BT_DBG("sock %p", sk);
300 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
302 rp = kmalloc(rp_size, GFP_KERNEL);
306 rp->num_commands = __constant_cpu_to_le16(num_commands);
307 rp->num_events = __constant_cpu_to_le16(num_events);
309 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
310 put_unaligned_le16(mgmt_commands[i], opcode);
312 for (i = 0; i < num_events; i++, opcode++)
313 put_unaligned_le16(mgmt_events[i], opcode);
315 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
322 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
325 struct mgmt_rp_read_index_list *rp;
331 BT_DBG("sock %p", sk);
333 read_lock(&hci_dev_list_lock);
336 list_for_each_entry(d, &hci_dev_list, list) {
337 if (!mgmt_valid_hdev(d))
343 rp_len = sizeof(*rp) + (2 * count);
344 rp = kmalloc(rp_len, GFP_ATOMIC);
346 read_unlock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (test_bit(HCI_SETUP, &d->dev_flags))
355 if (!mgmt_valid_hdev(d))
358 rp->index[count++] = cpu_to_le16(d->id);
359 BT_DBG("Added hci%u", d->id);
362 rp->num_controllers = cpu_to_le16(count);
363 rp_len = sizeof(*rp) + (2 * count);
365 read_unlock(&hci_dev_list_lock);
367 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
375 static u32 get_supported_settings(struct hci_dev *hdev)
379 settings |= MGMT_SETTING_POWERED;
380 settings |= MGMT_SETTING_PAIRABLE;
382 if (lmp_ssp_capable(hdev))
383 settings |= MGMT_SETTING_SSP;
385 if (lmp_bredr_capable(hdev)) {
386 settings |= MGMT_SETTING_CONNECTABLE;
387 settings |= MGMT_SETTING_FAST_CONNECTABLE;
388 settings |= MGMT_SETTING_DISCOVERABLE;
389 settings |= MGMT_SETTING_BREDR;
390 settings |= MGMT_SETTING_LINK_SECURITY;
394 settings |= MGMT_SETTING_HS;
396 if (lmp_le_capable(hdev))
397 settings |= MGMT_SETTING_LE;
402 static u32 get_current_settings(struct hci_dev *hdev)
406 if (hdev_is_powered(hdev))
407 settings |= MGMT_SETTING_POWERED;
409 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_CONNECTABLE;
412 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
413 settings |= MGMT_SETTING_DISCOVERABLE;
415 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
416 settings |= MGMT_SETTING_PAIRABLE;
418 if (lmp_bredr_capable(hdev))
419 settings |= MGMT_SETTING_BREDR;
421 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
422 settings |= MGMT_SETTING_LE;
424 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
425 settings |= MGMT_SETTING_LINK_SECURITY;
427 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
428 settings |= MGMT_SETTING_SSP;
430 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
431 settings |= MGMT_SETTING_HS;
436 #define PNP_INFO_SVCLASS_ID 0x1200
438 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
440 u8 *ptr = data, *uuids_start = NULL;
441 struct bt_uuid *uuid;
446 list_for_each_entry(uuid, &hdev->uuids, list) {
449 if (uuid->size != 16)
452 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
456 if (uuid16 == PNP_INFO_SVCLASS_ID)
462 uuids_start[1] = EIR_UUID16_ALL;
466 /* Stop if not enough space to put next UUID */
467 if ((ptr - data) + sizeof(u16) > len) {
468 uuids_start[1] = EIR_UUID16_SOME;
472 *ptr++ = (uuid16 & 0x00ff);
473 *ptr++ = (uuid16 & 0xff00) >> 8;
474 uuids_start[0] += sizeof(uuid16);
480 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
482 u8 *ptr = data, *uuids_start = NULL;
483 struct bt_uuid *uuid;
488 list_for_each_entry(uuid, &hdev->uuids, list) {
489 if (uuid->size != 32)
495 uuids_start[1] = EIR_UUID32_ALL;
499 /* Stop if not enough space to put next UUID */
500 if ((ptr - data) + sizeof(u32) > len) {
501 uuids_start[1] = EIR_UUID32_SOME;
505 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
507 uuids_start[0] += sizeof(u32);
513 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515 u8 *ptr = data, *uuids_start = NULL;
516 struct bt_uuid *uuid;
521 list_for_each_entry(uuid, &hdev->uuids, list) {
522 if (uuid->size != 128)
528 uuids_start[1] = EIR_UUID128_ALL;
532 /* Stop if not enough space to put next UUID */
533 if ((ptr - data) + 16 > len) {
534 uuids_start[1] = EIR_UUID128_SOME;
538 memcpy(ptr, uuid->uuid, 16);
540 uuids_start[0] += 16;
546 static void create_eir(struct hci_dev *hdev, u8 *data)
551 name_len = strlen(hdev->dev_name);
557 ptr[1] = EIR_NAME_SHORT;
559 ptr[1] = EIR_NAME_COMPLETE;
561 /* EIR Data length */
562 ptr[0] = name_len + 1;
564 memcpy(ptr + 2, hdev->dev_name, name_len);
566 ptr += (name_len + 2);
569 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
571 ptr[1] = EIR_TX_POWER;
572 ptr[2] = (u8) hdev->inq_tx_power;
577 if (hdev->devid_source > 0) {
579 ptr[1] = EIR_DEVICE_ID;
581 put_unaligned_le16(hdev->devid_source, ptr + 2);
582 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
583 put_unaligned_le16(hdev->devid_product, ptr + 6);
584 put_unaligned_le16(hdev->devid_version, ptr + 8);
589 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
590 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
591 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
594 static void update_eir(struct hci_request *req)
596 struct hci_dev *hdev = req->hdev;
597 struct hci_cp_write_eir cp;
599 if (!hdev_is_powered(hdev))
602 if (!lmp_ext_inq_capable(hdev))
605 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
608 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
611 memset(&cp, 0, sizeof(cp));
613 create_eir(hdev, cp.data);
615 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
618 memcpy(hdev->eir, cp.data, sizeof(cp.data));
620 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
623 static u8 get_service_classes(struct hci_dev *hdev)
625 struct bt_uuid *uuid;
628 list_for_each_entry(uuid, &hdev->uuids, list)
629 val |= uuid->svc_hint;
634 static void update_class(struct hci_request *req)
636 struct hci_dev *hdev = req->hdev;
639 BT_DBG("%s", hdev->name);
641 if (!hdev_is_powered(hdev))
644 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
647 cod[0] = hdev->minor_class;
648 cod[1] = hdev->major_class;
649 cod[2] = get_service_classes(hdev);
651 if (memcmp(cod, hdev->dev_class, 3) == 0)
654 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
657 static void service_cache_off(struct work_struct *work)
659 struct hci_dev *hdev = container_of(work, struct hci_dev,
661 struct hci_request req;
663 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
666 hci_req_init(&req, hdev);
673 hci_dev_unlock(hdev);
675 hci_req_run(&req, NULL);
678 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
680 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
683 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
685 /* Non-mgmt controlled devices get this bit set
686 * implicitly so that pairing works for them, however
687 * for mgmt we require user-space to explicitly enable
690 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
693 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
694 void *data, u16 data_len)
696 struct mgmt_rp_read_info rp;
698 BT_DBG("sock %p %s", sk, hdev->name);
702 memset(&rp, 0, sizeof(rp));
704 bacpy(&rp.bdaddr, &hdev->bdaddr);
706 rp.version = hdev->hci_ver;
707 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
709 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
710 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
712 memcpy(rp.dev_class, hdev->dev_class, 3);
714 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
715 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
717 hci_dev_unlock(hdev);
719 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
723 static void mgmt_pending_free(struct pending_cmd *cmd)
730 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
731 struct hci_dev *hdev, void *data,
734 struct pending_cmd *cmd;
736 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
740 cmd->opcode = opcode;
741 cmd->index = hdev->id;
743 cmd->param = kmalloc(len, GFP_KERNEL);
750 memcpy(cmd->param, data, len);
755 list_add(&cmd->list, &hdev->mgmt_pending);
760 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
761 void (*cb)(struct pending_cmd *cmd,
765 struct pending_cmd *cmd, *tmp;
767 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
768 if (opcode > 0 && cmd->opcode != opcode)
775 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
777 struct pending_cmd *cmd;
779 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
780 if (cmd->opcode == opcode)
787 static void mgmt_pending_remove(struct pending_cmd *cmd)
789 list_del(&cmd->list);
790 mgmt_pending_free(cmd);
793 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
795 __le32 settings = cpu_to_le32(get_current_settings(hdev));
797 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
801 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
804 struct mgmt_mode *cp = data;
805 struct pending_cmd *cmd;
808 BT_DBG("request for %s", hdev->name);
810 if (cp->val != 0x00 && cp->val != 0x01)
811 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
812 MGMT_STATUS_INVALID_PARAMS);
816 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
817 cancel_delayed_work(&hdev->power_off);
820 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
822 err = mgmt_powered(hdev, 1);
827 if (!!cp->val == hdev_is_powered(hdev)) {
828 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
832 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
833 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
838 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
845 queue_work(hdev->req_workqueue, &hdev->power_on);
847 queue_work(hdev->req_workqueue, &hdev->power_off.work);
852 hci_dev_unlock(hdev);
856 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
857 struct sock *skip_sk)
860 struct mgmt_hdr *hdr;
862 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
866 hdr = (void *) skb_put(skb, sizeof(*hdr));
867 hdr->opcode = cpu_to_le16(event);
869 hdr->index = cpu_to_le16(hdev->id);
871 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
872 hdr->len = cpu_to_le16(data_len);
875 memcpy(skb_put(skb, data_len), data, data_len);
878 __net_timestamp(skb);
880 hci_send_to_control(skb, skip_sk);
886 static int new_settings(struct hci_dev *hdev, struct sock *skip)
890 ev = cpu_to_le32(get_current_settings(hdev));
892 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
895 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
898 struct mgmt_cp_set_discoverable *cp = data;
899 struct pending_cmd *cmd;
904 BT_DBG("request for %s", hdev->name);
906 if (!lmp_bredr_capable(hdev))
907 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
908 MGMT_STATUS_NOT_SUPPORTED);
910 if (cp->val != 0x00 && cp->val != 0x01)
911 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
912 MGMT_STATUS_INVALID_PARAMS);
914 timeout = __le16_to_cpu(cp->timeout);
915 if (!cp->val && timeout > 0)
916 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
917 MGMT_STATUS_INVALID_PARAMS);
921 if (!hdev_is_powered(hdev) && timeout > 0) {
922 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
923 MGMT_STATUS_NOT_POWERED);
927 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
928 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
929 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
934 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
935 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
936 MGMT_STATUS_REJECTED);
940 if (!hdev_is_powered(hdev)) {
941 bool changed = false;
943 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
944 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
948 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
953 err = new_settings(hdev, sk);
958 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
959 if (hdev->discov_timeout > 0) {
960 cancel_delayed_work(&hdev->discov_off);
961 hdev->discov_timeout = 0;
964 if (cp->val && timeout > 0) {
965 hdev->discov_timeout = timeout;
966 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
967 msecs_to_jiffies(hdev->discov_timeout * 1000));
970 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
974 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
983 scan |= SCAN_INQUIRY;
985 cancel_delayed_work(&hdev->discov_off);
987 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
989 mgmt_pending_remove(cmd);
992 hdev->discov_timeout = timeout;
995 hci_dev_unlock(hdev);
999 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1002 struct mgmt_mode *cp = data;
1003 struct pending_cmd *cmd;
1007 BT_DBG("request for %s", hdev->name);
1009 if (!lmp_bredr_capable(hdev))
1010 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1011 MGMT_STATUS_NOT_SUPPORTED);
1013 if (cp->val != 0x00 && cp->val != 0x01)
1014 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1015 MGMT_STATUS_INVALID_PARAMS);
1019 if (!hdev_is_powered(hdev)) {
1020 bool changed = false;
1022 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1026 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1028 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1029 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1032 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1037 err = new_settings(hdev, sk);
1042 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1043 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1044 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1049 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1050 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1054 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1065 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1066 hdev->discov_timeout > 0)
1067 cancel_delayed_work(&hdev->discov_off);
1070 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1072 mgmt_pending_remove(cmd);
1075 hci_dev_unlock(hdev);
1079 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1082 struct mgmt_mode *cp = data;
1085 BT_DBG("request for %s", hdev->name);
1087 if (cp->val != 0x00 && cp->val != 0x01)
1088 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1089 MGMT_STATUS_INVALID_PARAMS);
1094 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1096 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1098 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1102 err = new_settings(hdev, sk);
1105 hci_dev_unlock(hdev);
1109 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1112 struct mgmt_mode *cp = data;
1113 struct pending_cmd *cmd;
1117 BT_DBG("request for %s", hdev->name);
1119 if (!lmp_bredr_capable(hdev))
1120 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1121 MGMT_STATUS_NOT_SUPPORTED);
1123 if (cp->val != 0x00 && cp->val != 0x01)
1124 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1125 MGMT_STATUS_INVALID_PARAMS);
1129 if (!hdev_is_powered(hdev)) {
1130 bool changed = false;
1132 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1133 &hdev->dev_flags)) {
1134 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1138 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1143 err = new_settings(hdev, sk);
1148 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1149 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1156 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1157 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1161 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1167 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1169 mgmt_pending_remove(cmd);
1174 hci_dev_unlock(hdev);
1178 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1180 struct mgmt_mode *cp = data;
1181 struct pending_cmd *cmd;
1185 BT_DBG("request for %s", hdev->name);
1187 if (!lmp_ssp_capable(hdev))
1188 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1189 MGMT_STATUS_NOT_SUPPORTED);
1191 if (cp->val != 0x00 && cp->val != 0x01)
1192 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1193 MGMT_STATUS_INVALID_PARAMS);
1199 if (!hdev_is_powered(hdev)) {
1200 bool changed = false;
1202 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1203 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1207 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1212 err = new_settings(hdev, sk);
1217 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1218 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1223 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1224 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1228 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1234 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1236 mgmt_pending_remove(cmd);
1241 hci_dev_unlock(hdev);
1245 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1247 struct mgmt_mode *cp = data;
1249 BT_DBG("request for %s", hdev->name);
1252 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1253 MGMT_STATUS_NOT_SUPPORTED);
1255 if (cp->val != 0x00 && cp->val != 0x01)
1256 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1257 MGMT_STATUS_INVALID_PARAMS);
1260 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1262 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1264 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1267 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1269 struct mgmt_mode *cp = data;
1270 struct hci_cp_write_le_host_supported hci_cp;
1271 struct pending_cmd *cmd;
1275 BT_DBG("request for %s", hdev->name);
1277 if (!lmp_le_capable(hdev))
1278 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1279 MGMT_STATUS_NOT_SUPPORTED);
1281 if (cp->val != 0x00 && cp->val != 0x01)
1282 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1283 MGMT_STATUS_INVALID_PARAMS);
1288 enabled = lmp_host_le_capable(hdev);
1290 if (!hdev_is_powered(hdev) || val == enabled) {
1291 bool changed = false;
1293 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1294 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1298 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1303 err = new_settings(hdev, sk);
1308 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1309 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1314 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1320 memset(&hci_cp, 0, sizeof(hci_cp));
1324 hci_cp.simul = lmp_le_br_capable(hdev);
1327 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1330 mgmt_pending_remove(cmd);
1333 hci_dev_unlock(hdev);
1337 /* This is a helper function to test for pending mgmt commands that can
1338 * cause CoD or EIR HCI commands. We can only allow one such pending
1339 * mgmt command at a time since otherwise we cannot easily track what
1340 * the current values are, will be, and based on that calculate if a new
1341 * HCI command needs to be sent and if yes with what value.
1343 static bool pending_eir_or_class(struct hci_dev *hdev)
1345 struct pending_cmd *cmd;
1347 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1348 switch (cmd->opcode) {
1349 case MGMT_OP_ADD_UUID:
1350 case MGMT_OP_REMOVE_UUID:
1351 case MGMT_OP_SET_DEV_CLASS:
1352 case MGMT_OP_SET_POWERED:
1360 static const u8 bluetooth_base_uuid[] = {
1361 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1362 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1365 static u8 get_uuid_size(const u8 *uuid)
1369 if (memcmp(uuid, bluetooth_base_uuid, 12))
1372 val = get_unaligned_le32(&uuid[12]);
1379 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1381 struct pending_cmd *cmd;
1385 cmd = mgmt_pending_find(mgmt_op, hdev);
1389 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1390 hdev->dev_class, 3);
1392 mgmt_pending_remove(cmd);
1395 hci_dev_unlock(hdev);
1398 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1400 BT_DBG("status 0x%02x", status);
1402 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1405 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1407 struct mgmt_cp_add_uuid *cp = data;
1408 struct pending_cmd *cmd;
1409 struct hci_request req;
1410 struct bt_uuid *uuid;
1413 BT_DBG("request for %s", hdev->name);
1417 if (pending_eir_or_class(hdev)) {
1418 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1423 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1429 memcpy(uuid->uuid, cp->uuid, 16);
1430 uuid->svc_hint = cp->svc_hint;
1431 uuid->size = get_uuid_size(cp->uuid);
1433 list_add_tail(&uuid->list, &hdev->uuids);
1435 hci_req_init(&req, hdev);
1440 err = hci_req_run(&req, add_uuid_complete);
1442 if (err != -ENODATA)
1445 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1446 hdev->dev_class, 3);
1450 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1459 hci_dev_unlock(hdev);
1463 static bool enable_service_cache(struct hci_dev *hdev)
1465 if (!hdev_is_powered(hdev))
1468 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1469 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1477 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1479 BT_DBG("status 0x%02x", status);
1481 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1484 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1487 struct mgmt_cp_remove_uuid *cp = data;
1488 struct pending_cmd *cmd;
1489 struct bt_uuid *match, *tmp;
1490 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1491 struct hci_request req;
1494 BT_DBG("request for %s", hdev->name);
1498 if (pending_eir_or_class(hdev)) {
1499 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1504 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1505 err = hci_uuids_clear(hdev);
1507 if (enable_service_cache(hdev)) {
1508 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1509 0, hdev->dev_class, 3);
1518 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1519 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1522 list_del(&match->list);
1528 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1529 MGMT_STATUS_INVALID_PARAMS);
1534 hci_req_init(&req, hdev);
1539 err = hci_req_run(&req, remove_uuid_complete);
1541 if (err != -ENODATA)
1544 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1545 hdev->dev_class, 3);
1549 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1558 hci_dev_unlock(hdev);
1562 static void set_class_complete(struct hci_dev *hdev, u8 status)
1564 BT_DBG("status 0x%02x", status);
1566 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1569 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1572 struct mgmt_cp_set_dev_class *cp = data;
1573 struct pending_cmd *cmd;
1574 struct hci_request req;
1577 BT_DBG("request for %s", hdev->name);
1579 if (!lmp_bredr_capable(hdev))
1580 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1581 MGMT_STATUS_NOT_SUPPORTED);
1585 if (pending_eir_or_class(hdev)) {
1586 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1591 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1592 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1593 MGMT_STATUS_INVALID_PARAMS);
1597 hdev->major_class = cp->major;
1598 hdev->minor_class = cp->minor;
1600 if (!hdev_is_powered(hdev)) {
1601 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1602 hdev->dev_class, 3);
1606 hci_req_init(&req, hdev);
1608 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1609 hci_dev_unlock(hdev);
1610 cancel_delayed_work_sync(&hdev->service_cache);
1617 err = hci_req_run(&req, set_class_complete);
1619 if (err != -ENODATA)
1622 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1623 hdev->dev_class, 3);
1627 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1636 hci_dev_unlock(hdev);
1640 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1643 struct mgmt_cp_load_link_keys *cp = data;
1644 u16 key_count, expected_len;
1647 key_count = __le16_to_cpu(cp->key_count);
1649 expected_len = sizeof(*cp) + key_count *
1650 sizeof(struct mgmt_link_key_info);
1651 if (expected_len != len) {
1652 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1654 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1655 MGMT_STATUS_INVALID_PARAMS);
1658 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1659 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1660 MGMT_STATUS_INVALID_PARAMS);
1662 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1665 for (i = 0; i < key_count; i++) {
1666 struct mgmt_link_key_info *key = &cp->keys[i];
1668 if (key->addr.type != BDADDR_BREDR)
1669 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1670 MGMT_STATUS_INVALID_PARAMS);
1675 hci_link_keys_clear(hdev);
1677 set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1680 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1682 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1684 for (i = 0; i < key_count; i++) {
1685 struct mgmt_link_key_info *key = &cp->keys[i];
1687 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1688 key->type, key->pin_len);
1691 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1693 hci_dev_unlock(hdev);
1698 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1699 u8 addr_type, struct sock *skip_sk)
1701 struct mgmt_ev_device_unpaired ev;
1703 bacpy(&ev.addr.bdaddr, bdaddr);
1704 ev.addr.type = addr_type;
1706 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1710 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1713 struct mgmt_cp_unpair_device *cp = data;
1714 struct mgmt_rp_unpair_device rp;
1715 struct hci_cp_disconnect dc;
1716 struct pending_cmd *cmd;
1717 struct hci_conn *conn;
1720 memset(&rp, 0, sizeof(rp));
1721 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1722 rp.addr.type = cp->addr.type;
1724 if (!bdaddr_type_is_valid(cp->addr.type))
1725 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1726 MGMT_STATUS_INVALID_PARAMS,
1729 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1730 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1731 MGMT_STATUS_INVALID_PARAMS,
1736 if (!hdev_is_powered(hdev)) {
1737 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1738 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1742 if (cp->addr.type == BDADDR_BREDR)
1743 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1745 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1748 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1749 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1753 if (cp->disconnect) {
1754 if (cp->addr.type == BDADDR_BREDR)
1755 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1758 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1765 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1767 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1771 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1778 dc.handle = cpu_to_le16(conn->handle);
1779 dc.reason = 0x13; /* Remote User Terminated Connection */
1780 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1782 mgmt_pending_remove(cmd);
1785 hci_dev_unlock(hdev);
1789 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1792 struct mgmt_cp_disconnect *cp = data;
1793 struct mgmt_rp_disconnect rp;
1794 struct hci_cp_disconnect dc;
1795 struct pending_cmd *cmd;
1796 struct hci_conn *conn;
1801 memset(&rp, 0, sizeof(rp));
1802 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1803 rp.addr.type = cp->addr.type;
1805 if (!bdaddr_type_is_valid(cp->addr.type))
1806 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1807 MGMT_STATUS_INVALID_PARAMS,
1812 if (!test_bit(HCI_UP, &hdev->flags)) {
1813 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1814 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1818 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1819 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1820 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1824 if (cp->addr.type == BDADDR_BREDR)
1825 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1828 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1830 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1831 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1832 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1836 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1842 dc.handle = cpu_to_le16(conn->handle);
1843 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1845 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1847 mgmt_pending_remove(cmd);
1850 hci_dev_unlock(hdev);
1854 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1856 switch (link_type) {
1858 switch (addr_type) {
1859 case ADDR_LE_DEV_PUBLIC:
1860 return BDADDR_LE_PUBLIC;
1863 /* Fallback to LE Random address type */
1864 return BDADDR_LE_RANDOM;
1868 /* Fallback to BR/EDR type */
1869 return BDADDR_BREDR;
1873 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1876 struct mgmt_rp_get_connections *rp;
1886 if (!hdev_is_powered(hdev)) {
1887 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1888 MGMT_STATUS_NOT_POWERED);
1893 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1894 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1898 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1899 rp = kmalloc(rp_len, GFP_KERNEL);
1906 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1907 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1909 bacpy(&rp->addr[i].bdaddr, &c->dst);
1910 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1911 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1916 rp->conn_count = cpu_to_le16(i);
1918 /* Recalculate length in case of filtered SCO connections, etc */
1919 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1921 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1927 hci_dev_unlock(hdev);
1931 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1932 struct mgmt_cp_pin_code_neg_reply *cp)
1934 struct pending_cmd *cmd;
1937 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
1942 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1943 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
1945 mgmt_pending_remove(cmd);
1950 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
1953 struct hci_conn *conn;
1954 struct mgmt_cp_pin_code_reply *cp = data;
1955 struct hci_cp_pin_code_reply reply;
1956 struct pending_cmd *cmd;
1963 if (!hdev_is_powered(hdev)) {
1964 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1965 MGMT_STATUS_NOT_POWERED);
1969 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
1971 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1972 MGMT_STATUS_NOT_CONNECTED);
1976 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
1977 struct mgmt_cp_pin_code_neg_reply ncp;
1979 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
1981 BT_ERR("PIN code is not 16 bytes long");
1983 err = send_pin_code_neg_reply(sk, hdev, &ncp);
1985 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1986 MGMT_STATUS_INVALID_PARAMS);
1991 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
1997 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
1998 reply.pin_len = cp->pin_len;
1999 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2001 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2003 mgmt_pending_remove(cmd);
2006 hci_dev_unlock(hdev);
2010 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2013 struct mgmt_cp_set_io_capability *cp = data;
2019 hdev->io_capability = cp->io_capability;
2021 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2022 hdev->io_capability);
2024 hci_dev_unlock(hdev);
2026 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2030 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2032 struct hci_dev *hdev = conn->hdev;
2033 struct pending_cmd *cmd;
2035 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2036 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2039 if (cmd->user_data != conn)
2048 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2050 struct mgmt_rp_pair_device rp;
2051 struct hci_conn *conn = cmd->user_data;
2053 bacpy(&rp.addr.bdaddr, &conn->dst);
2054 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2056 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2059 /* So we don't get further callbacks for this connection */
2060 conn->connect_cfm_cb = NULL;
2061 conn->security_cfm_cb = NULL;
2062 conn->disconn_cfm_cb = NULL;
2066 mgmt_pending_remove(cmd);
2069 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2071 struct pending_cmd *cmd;
2073 BT_DBG("status %u", status);
2075 cmd = find_pairing(conn);
2077 BT_DBG("Unable to find a pending command");
2079 pairing_complete(cmd, mgmt_status(status));
2082 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2084 struct pending_cmd *cmd;
2086 BT_DBG("status %u", status);
2091 cmd = find_pairing(conn);
2093 BT_DBG("Unable to find a pending command");
2095 pairing_complete(cmd, mgmt_status(status));
2098 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2101 struct mgmt_cp_pair_device *cp = data;
2102 struct mgmt_rp_pair_device rp;
2103 struct pending_cmd *cmd;
2104 u8 sec_level, auth_type;
2105 struct hci_conn *conn;
2110 memset(&rp, 0, sizeof(rp));
2111 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2112 rp.addr.type = cp->addr.type;
2114 if (!bdaddr_type_is_valid(cp->addr.type))
2115 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2116 MGMT_STATUS_INVALID_PARAMS,
2121 if (!hdev_is_powered(hdev)) {
2122 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2123 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2127 sec_level = BT_SECURITY_MEDIUM;
2128 if (cp->io_cap == 0x03)
2129 auth_type = HCI_AT_DEDICATED_BONDING;
2131 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2133 if (cp->addr.type == BDADDR_BREDR)
2134 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2135 cp->addr.type, sec_level, auth_type);
2137 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2138 cp->addr.type, sec_level, auth_type);
2143 if (PTR_ERR(conn) == -EBUSY)
2144 status = MGMT_STATUS_BUSY;
2146 status = MGMT_STATUS_CONNECT_FAILED;
2148 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2154 if (conn->connect_cfm_cb) {
2156 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2157 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2161 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2168 /* For LE, just connecting isn't a proof that the pairing finished */
2169 if (cp->addr.type == BDADDR_BREDR)
2170 conn->connect_cfm_cb = pairing_complete_cb;
2172 conn->connect_cfm_cb = le_connect_complete_cb;
2174 conn->security_cfm_cb = pairing_complete_cb;
2175 conn->disconn_cfm_cb = pairing_complete_cb;
2176 conn->io_capability = cp->io_cap;
2177 cmd->user_data = conn;
2179 if (conn->state == BT_CONNECTED &&
2180 hci_conn_security(conn, sec_level, auth_type))
2181 pairing_complete(cmd, 0);
2186 hci_dev_unlock(hdev);
2190 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2193 struct mgmt_addr_info *addr = data;
2194 struct pending_cmd *cmd;
2195 struct hci_conn *conn;
2202 if (!hdev_is_powered(hdev)) {
2203 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2204 MGMT_STATUS_NOT_POWERED);
2208 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2210 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2211 MGMT_STATUS_INVALID_PARAMS);
2215 conn = cmd->user_data;
2217 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2218 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2219 MGMT_STATUS_INVALID_PARAMS);
2223 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2225 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2226 addr, sizeof(*addr));
2228 hci_dev_unlock(hdev);
2232 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2233 bdaddr_t *bdaddr, u8 type, u16 mgmt_op,
2234 u16 hci_op, __le32 passkey)
2236 struct pending_cmd *cmd;
2237 struct hci_conn *conn;
2242 if (!hdev_is_powered(hdev)) {
2243 err = cmd_status(sk, hdev->id, mgmt_op,
2244 MGMT_STATUS_NOT_POWERED);
2248 if (type == BDADDR_BREDR)
2249 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
2251 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
2254 err = cmd_status(sk, hdev->id, mgmt_op,
2255 MGMT_STATUS_NOT_CONNECTED);
2259 if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
2260 /* Continue with pairing via SMP */
2261 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2264 err = cmd_status(sk, hdev->id, mgmt_op,
2265 MGMT_STATUS_SUCCESS);
2267 err = cmd_status(sk, hdev->id, mgmt_op,
2268 MGMT_STATUS_FAILED);
2273 cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr));
2279 /* Continue with pairing via HCI */
2280 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2281 struct hci_cp_user_passkey_reply cp;
2283 bacpy(&cp.bdaddr, bdaddr);
2284 cp.passkey = passkey;
2285 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2287 err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr);
2290 mgmt_pending_remove(cmd);
2293 hci_dev_unlock(hdev);
2297 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2298 void *data, u16 len)
2300 struct mgmt_cp_pin_code_neg_reply *cp = data;
2304 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2305 MGMT_OP_PIN_CODE_NEG_REPLY,
2306 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2309 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2312 struct mgmt_cp_user_confirm_reply *cp = data;
2316 if (len != sizeof(*cp))
2317 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2318 MGMT_STATUS_INVALID_PARAMS);
2320 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2321 MGMT_OP_USER_CONFIRM_REPLY,
2322 HCI_OP_USER_CONFIRM_REPLY, 0);
2325 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2326 void *data, u16 len)
2328 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2332 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2333 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2334 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2337 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2340 struct mgmt_cp_user_passkey_reply *cp = data;
2344 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2345 MGMT_OP_USER_PASSKEY_REPLY,
2346 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2349 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2350 void *data, u16 len)
2352 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2356 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2357 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2358 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2361 static void update_name(struct hci_request *req)
2363 struct hci_dev *hdev = req->hdev;
2364 struct hci_cp_write_local_name cp;
2366 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2368 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2371 static void set_name_complete(struct hci_dev *hdev, u8 status)
2373 struct mgmt_cp_set_local_name *cp;
2374 struct pending_cmd *cmd;
2376 BT_DBG("status 0x%02x", status);
2380 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2387 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2388 mgmt_status(status));
2390 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2393 mgmt_pending_remove(cmd);
2396 hci_dev_unlock(hdev);
2399 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2402 struct mgmt_cp_set_local_name *cp = data;
2403 struct pending_cmd *cmd;
2404 struct hci_request req;
2411 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2413 if (!hdev_is_powered(hdev)) {
2414 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2416 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2421 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2427 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2433 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2435 hci_req_init(&req, hdev);
2437 if (lmp_bredr_capable(hdev)) {
2442 if (lmp_le_capable(hdev))
2443 hci_update_ad(&req);
2445 err = hci_req_run(&req, set_name_complete);
2447 mgmt_pending_remove(cmd);
2450 hci_dev_unlock(hdev);
2454 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2455 void *data, u16 data_len)
2457 struct pending_cmd *cmd;
2460 BT_DBG("%s", hdev->name);
2464 if (!hdev_is_powered(hdev)) {
2465 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2466 MGMT_STATUS_NOT_POWERED);
2470 if (!lmp_ssp_capable(hdev)) {
2471 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2472 MGMT_STATUS_NOT_SUPPORTED);
2476 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2477 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2482 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2488 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2490 mgmt_pending_remove(cmd);
2493 hci_dev_unlock(hdev);
2497 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2498 void *data, u16 len)
2500 struct mgmt_cp_add_remote_oob_data *cp = data;
2504 BT_DBG("%s ", hdev->name);
2508 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2511 status = MGMT_STATUS_FAILED;
2513 status = MGMT_STATUS_SUCCESS;
2515 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2516 &cp->addr, sizeof(cp->addr));
2518 hci_dev_unlock(hdev);
2522 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2523 void *data, u16 len)
2525 struct mgmt_cp_remove_remote_oob_data *cp = data;
2529 BT_DBG("%s", hdev->name);
2533 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2535 status = MGMT_STATUS_INVALID_PARAMS;
2537 status = MGMT_STATUS_SUCCESS;
2539 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2540 status, &cp->addr, sizeof(cp->addr));
2542 hci_dev_unlock(hdev);
2546 int mgmt_interleaved_discovery(struct hci_dev *hdev)
2550 BT_DBG("%s", hdev->name);
2554 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
2556 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2558 hci_dev_unlock(hdev);
2563 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2564 void *data, u16 len)
2566 struct mgmt_cp_start_discovery *cp = data;
2567 struct pending_cmd *cmd;
2570 BT_DBG("%s", hdev->name);
2574 if (!hdev_is_powered(hdev)) {
2575 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2576 MGMT_STATUS_NOT_POWERED);
2580 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2581 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2586 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2587 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2592 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2598 hdev->discovery.type = cp->type;
2600 switch (hdev->discovery.type) {
2601 case DISCOV_TYPE_BREDR:
2602 if (!lmp_bredr_capable(hdev)) {
2603 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2604 MGMT_STATUS_NOT_SUPPORTED);
2605 mgmt_pending_remove(cmd);
2609 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2612 case DISCOV_TYPE_LE:
2613 if (!lmp_host_le_capable(hdev)) {
2614 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2615 MGMT_STATUS_NOT_SUPPORTED);
2616 mgmt_pending_remove(cmd);
2620 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2621 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2624 case DISCOV_TYPE_INTERLEAVED:
2625 if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
2626 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2627 MGMT_STATUS_NOT_SUPPORTED);
2628 mgmt_pending_remove(cmd);
2632 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, LE_SCAN_WIN,
2633 LE_SCAN_TIMEOUT_BREDR_LE);
2637 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2638 MGMT_STATUS_INVALID_PARAMS);
2639 mgmt_pending_remove(cmd);
2644 mgmt_pending_remove(cmd);
2646 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2649 hci_dev_unlock(hdev);
2653 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2656 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2657 struct pending_cmd *cmd;
2658 struct hci_cp_remote_name_req_cancel cp;
2659 struct inquiry_entry *e;
2662 BT_DBG("%s", hdev->name);
2666 if (!hci_discovery_active(hdev)) {
2667 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2668 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2669 sizeof(mgmt_cp->type));
2673 if (hdev->discovery.type != mgmt_cp->type) {
2674 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2675 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2676 sizeof(mgmt_cp->type));
2680 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2686 switch (hdev->discovery.state) {
2687 case DISCOVERY_FINDING:
2688 if (test_bit(HCI_INQUIRY, &hdev->flags))
2689 err = hci_cancel_inquiry(hdev);
2691 err = hci_cancel_le_scan(hdev);
2695 case DISCOVERY_RESOLVING:
2696 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2699 mgmt_pending_remove(cmd);
2700 err = cmd_complete(sk, hdev->id,
2701 MGMT_OP_STOP_DISCOVERY, 0,
2703 sizeof(mgmt_cp->type));
2704 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2708 bacpy(&cp.bdaddr, &e->data.bdaddr);
2709 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2715 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2720 mgmt_pending_remove(cmd);
2722 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2725 hci_dev_unlock(hdev);
2729 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2732 struct mgmt_cp_confirm_name *cp = data;
2733 struct inquiry_entry *e;
2736 BT_DBG("%s", hdev->name);
2740 if (!hci_discovery_active(hdev)) {
2741 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2742 MGMT_STATUS_FAILED);
2746 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2748 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2749 MGMT_STATUS_INVALID_PARAMS);
2753 if (cp->name_known) {
2754 e->name_state = NAME_KNOWN;
2757 e->name_state = NAME_NEEDED;
2758 hci_inquiry_cache_update_resolve(hdev, e);
2761 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2765 hci_dev_unlock(hdev);
2769 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2772 struct mgmt_cp_block_device *cp = data;
2776 BT_DBG("%s", hdev->name);
2778 if (!bdaddr_type_is_valid(cp->addr.type))
2779 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2780 MGMT_STATUS_INVALID_PARAMS,
2781 &cp->addr, sizeof(cp->addr));
2785 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2787 status = MGMT_STATUS_FAILED;
2789 status = MGMT_STATUS_SUCCESS;
2791 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2792 &cp->addr, sizeof(cp->addr));
2794 hci_dev_unlock(hdev);
2799 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2802 struct mgmt_cp_unblock_device *cp = data;
2806 BT_DBG("%s", hdev->name);
2808 if (!bdaddr_type_is_valid(cp->addr.type))
2809 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
2810 MGMT_STATUS_INVALID_PARAMS,
2811 &cp->addr, sizeof(cp->addr));
2815 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2817 status = MGMT_STATUS_INVALID_PARAMS;
2819 status = MGMT_STATUS_SUCCESS;
2821 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2822 &cp->addr, sizeof(cp->addr));
2824 hci_dev_unlock(hdev);
2829 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2832 struct mgmt_cp_set_device_id *cp = data;
2833 struct hci_request req;
2837 BT_DBG("%s", hdev->name);
2839 source = __le16_to_cpu(cp->source);
2841 if (source > 0x0002)
2842 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2843 MGMT_STATUS_INVALID_PARAMS);
2847 hdev->devid_source = source;
2848 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2849 hdev->devid_product = __le16_to_cpu(cp->product);
2850 hdev->devid_version = __le16_to_cpu(cp->version);
2852 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2854 hci_req_init(&req, hdev);
2856 hci_req_run(&req, NULL);
2858 hci_dev_unlock(hdev);
2863 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2864 void *data, u16 len)
2866 struct mgmt_mode *cp = data;
2867 struct hci_cp_write_page_scan_activity acp;
2871 BT_DBG("%s", hdev->name);
2873 if (!lmp_bredr_capable(hdev))
2874 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2875 MGMT_STATUS_NOT_SUPPORTED);
2877 if (cp->val != 0x00 && cp->val != 0x01)
2878 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2879 MGMT_STATUS_INVALID_PARAMS);
2881 if (!hdev_is_powered(hdev))
2882 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2883 MGMT_STATUS_NOT_POWERED);
2885 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2886 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2887 MGMT_STATUS_REJECTED);
2892 type = PAGE_SCAN_TYPE_INTERLACED;
2894 /* 160 msec page scan interval */
2895 acp.interval = __constant_cpu_to_le16(0x0100);
2897 type = PAGE_SCAN_TYPE_STANDARD; /* default */
2899 /* default 1.28 sec page scan */
2900 acp.interval = __constant_cpu_to_le16(0x0800);
2903 /* default 11.25 msec page scan window */
2904 acp.window = __constant_cpu_to_le16(0x0012);
2906 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp),
2909 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2910 MGMT_STATUS_FAILED);
2914 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
2916 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2917 MGMT_STATUS_FAILED);
2921 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 0,
2924 hci_dev_unlock(hdev);
2928 static bool ltk_is_valid(struct mgmt_ltk_info *key)
2930 if (key->authenticated != 0x00 && key->authenticated != 0x01)
2932 if (key->master != 0x00 && key->master != 0x01)
2934 if (!bdaddr_type_is_le(key->addr.type))
2939 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2940 void *cp_data, u16 len)
2942 struct mgmt_cp_load_long_term_keys *cp = cp_data;
2943 u16 key_count, expected_len;
2946 key_count = __le16_to_cpu(cp->key_count);
2948 expected_len = sizeof(*cp) + key_count *
2949 sizeof(struct mgmt_ltk_info);
2950 if (expected_len != len) {
2951 BT_ERR("load_keys: expected %u bytes, got %u bytes",
2953 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
2954 MGMT_STATUS_INVALID_PARAMS);
2957 BT_DBG("%s key_count %u", hdev->name, key_count);
2959 for (i = 0; i < key_count; i++) {
2960 struct mgmt_ltk_info *key = &cp->keys[i];
2962 if (!ltk_is_valid(key))
2963 return cmd_status(sk, hdev->id,
2964 MGMT_OP_LOAD_LONG_TERM_KEYS,
2965 MGMT_STATUS_INVALID_PARAMS);
2970 hci_smp_ltks_clear(hdev);
2972 for (i = 0; i < key_count; i++) {
2973 struct mgmt_ltk_info *key = &cp->keys[i];
2979 type = HCI_SMP_LTK_SLAVE;
2981 hci_add_ltk(hdev, &key->addr.bdaddr,
2982 bdaddr_to_le(key->addr.type),
2983 type, 0, key->authenticated, key->val,
2984 key->enc_size, key->ediv, key->rand);
2987 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
2990 hci_dev_unlock(hdev);
2995 static const struct mgmt_handler {
2996 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3000 } mgmt_handlers[] = {
3001 { NULL }, /* 0x0000 (no command) */
3002 { read_version, false, MGMT_READ_VERSION_SIZE },
3003 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3004 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3005 { read_controller_info, false, MGMT_READ_INFO_SIZE },
3006 { set_powered, false, MGMT_SETTING_SIZE },
3007 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
3008 { set_connectable, false, MGMT_SETTING_SIZE },
3009 { set_fast_connectable, false, MGMT_SETTING_SIZE },
3010 { set_pairable, false, MGMT_SETTING_SIZE },
3011 { set_link_security, false, MGMT_SETTING_SIZE },
3012 { set_ssp, false, MGMT_SETTING_SIZE },
3013 { set_hs, false, MGMT_SETTING_SIZE },
3014 { set_le, false, MGMT_SETTING_SIZE },
3015 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
3016 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
3017 { add_uuid, false, MGMT_ADD_UUID_SIZE },
3018 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
3019 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
3020 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
3021 { disconnect, false, MGMT_DISCONNECT_SIZE },
3022 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
3023 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
3024 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
3025 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
3026 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
3027 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
3028 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
3029 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
3030 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
3031 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
3032 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
3033 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
3034 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
3035 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
3036 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3037 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3038 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3039 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3040 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3041 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3045 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3049 struct mgmt_hdr *hdr;
3050 u16 opcode, index, len;
3051 struct hci_dev *hdev = NULL;
3052 const struct mgmt_handler *handler;
3055 BT_DBG("got %zu bytes", msglen);
3057 if (msglen < sizeof(*hdr))
3060 buf = kmalloc(msglen, GFP_KERNEL);
3064 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3070 opcode = __le16_to_cpu(hdr->opcode);
3071 index = __le16_to_cpu(hdr->index);
3072 len = __le16_to_cpu(hdr->len);
3074 if (len != msglen - sizeof(*hdr)) {
3079 if (index != MGMT_INDEX_NONE) {
3080 hdev = hci_dev_get(index);
3082 err = cmd_status(sk, index, opcode,
3083 MGMT_STATUS_INVALID_INDEX);
3088 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3089 mgmt_handlers[opcode].func == NULL) {
3090 BT_DBG("Unknown op %u", opcode);
3091 err = cmd_status(sk, index, opcode,
3092 MGMT_STATUS_UNKNOWN_COMMAND);
3096 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3097 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3098 err = cmd_status(sk, index, opcode,
3099 MGMT_STATUS_INVALID_INDEX);
3103 handler = &mgmt_handlers[opcode];
3105 if ((handler->var_len && len < handler->data_len) ||
3106 (!handler->var_len && len != handler->data_len)) {
3107 err = cmd_status(sk, index, opcode,
3108 MGMT_STATUS_INVALID_PARAMS);
3113 mgmt_init_hdev(sk, hdev);
3115 cp = buf + sizeof(*hdr);
3117 err = handler->func(sk, hdev, cp, len);
3131 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
3135 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
3136 mgmt_pending_remove(cmd);
3139 int mgmt_index_added(struct hci_dev *hdev)
3141 if (!mgmt_valid_hdev(hdev))
3144 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3147 int mgmt_index_removed(struct hci_dev *hdev)
3149 u8 status = MGMT_STATUS_INVALID_INDEX;
3151 if (!mgmt_valid_hdev(hdev))
3154 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3156 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3161 struct hci_dev *hdev;
3165 static void settings_rsp(struct pending_cmd *cmd, void *data)
3167 struct cmd_lookup *match = data;
3169 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
3171 list_del(&cmd->list);
3173 if (match->sk == NULL) {
3174 match->sk = cmd->sk;
3175 sock_hold(match->sk);
3178 mgmt_pending_free(cmd);
3181 static void set_bredr_scan(struct hci_request *req)
3183 struct hci_dev *hdev = req->hdev;
3186 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3188 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3189 scan |= SCAN_INQUIRY;
3192 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3195 static void powered_complete(struct hci_dev *hdev, u8 status)
3197 struct cmd_lookup match = { NULL, hdev };
3199 BT_DBG("status 0x%02x", status);
3203 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3205 new_settings(hdev, match.sk);
3207 hci_dev_unlock(hdev);
3213 static int powered_update_hci(struct hci_dev *hdev)
3215 struct hci_request req;
3218 hci_req_init(&req, hdev);
3220 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3221 !lmp_host_ssp_capable(hdev)) {
3224 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3227 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
3228 struct hci_cp_write_le_host_supported cp;
3231 cp.simul = lmp_le_br_capable(hdev);
3233 /* Check first if we already have the right
3234 * host state (host features set)
3236 if (cp.le != lmp_host_le_capable(hdev) ||
3237 cp.simul != lmp_host_le_br_capable(hdev))
3238 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3242 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3243 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3244 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3245 sizeof(link_sec), &link_sec);
3247 if (lmp_bredr_capable(hdev)) {
3248 set_bredr_scan(&req);
3254 return hci_req_run(&req, powered_complete);
3257 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3259 struct cmd_lookup match = { NULL, hdev };
3260 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3261 u8 zero_cod[] = { 0, 0, 0 };
3264 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3268 if (powered_update_hci(hdev) == 0)
3271 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3276 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3277 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3279 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3280 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3281 zero_cod, sizeof(zero_cod), NULL);
3284 err = new_settings(hdev, match.sk);
3292 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3294 struct cmd_lookup match = { NULL, hdev };
3295 bool changed = false;
3299 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3302 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3306 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3310 err = new_settings(hdev, match.sk);
3318 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3320 struct cmd_lookup match = { NULL, hdev };
3321 bool changed = false;
3325 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3328 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3332 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp,
3336 err = new_settings(hdev, match.sk);
3344 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3346 u8 mgmt_err = mgmt_status(status);
3348 if (scan & SCAN_PAGE)
3349 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3350 cmd_status_rsp, &mgmt_err);
3352 if (scan & SCAN_INQUIRY)
3353 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3354 cmd_status_rsp, &mgmt_err);
3359 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3362 struct mgmt_ev_new_link_key ev;
3364 memset(&ev, 0, sizeof(ev));
3366 ev.store_hint = persistent;
3367 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3368 ev.key.addr.type = BDADDR_BREDR;
3369 ev.key.type = key->type;
3370 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3371 ev.key.pin_len = key->pin_len;
3373 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3376 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3378 struct mgmt_ev_new_long_term_key ev;
3380 memset(&ev, 0, sizeof(ev));
3382 ev.store_hint = persistent;
3383 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3384 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3385 ev.key.authenticated = key->authenticated;
3386 ev.key.enc_size = key->enc_size;
3387 ev.key.ediv = key->ediv;
3389 if (key->type == HCI_SMP_LTK)
3392 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3393 memcpy(ev.key.val, key->val, sizeof(key->val));
3395 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3399 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3400 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3404 struct mgmt_ev_device_connected *ev = (void *) buf;
3407 bacpy(&ev->addr.bdaddr, bdaddr);
3408 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3410 ev->flags = __cpu_to_le32(flags);
3413 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3416 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3417 eir_len = eir_append_data(ev->eir, eir_len,
3418 EIR_CLASS_OF_DEV, dev_class, 3);
3420 ev->eir_len = cpu_to_le16(eir_len);
3422 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3423 sizeof(*ev) + eir_len, NULL);
3426 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3428 struct mgmt_cp_disconnect *cp = cmd->param;
3429 struct sock **sk = data;
3430 struct mgmt_rp_disconnect rp;
3432 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3433 rp.addr.type = cp->addr.type;
3435 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3441 mgmt_pending_remove(cmd);
3444 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3446 struct hci_dev *hdev = data;
3447 struct mgmt_cp_unpair_device *cp = cmd->param;
3448 struct mgmt_rp_unpair_device rp;
3450 memset(&rp, 0, sizeof(rp));
3451 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3452 rp.addr.type = cp->addr.type;
3454 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3456 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3458 mgmt_pending_remove(cmd);
3461 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3462 u8 link_type, u8 addr_type, u8 reason)
3464 struct mgmt_ev_device_disconnected ev;
3465 struct sock *sk = NULL;
3468 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3470 bacpy(&ev.addr.bdaddr, bdaddr);
3471 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3474 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3480 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3486 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3487 u8 link_type, u8 addr_type, u8 status)
3489 struct mgmt_rp_disconnect rp;
3490 struct pending_cmd *cmd;
3493 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3496 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3500 bacpy(&rp.addr.bdaddr, bdaddr);
3501 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3503 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3504 mgmt_status(status), &rp, sizeof(rp));
3506 mgmt_pending_remove(cmd);
3511 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3512 u8 addr_type, u8 status)
3514 struct mgmt_ev_connect_failed ev;
3516 bacpy(&ev.addr.bdaddr, bdaddr);
3517 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3518 ev.status = mgmt_status(status);
3520 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3523 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3525 struct mgmt_ev_pin_code_request ev;
3527 bacpy(&ev.addr.bdaddr, bdaddr);
3528 ev.addr.type = BDADDR_BREDR;
3531 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3535 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3538 struct pending_cmd *cmd;
3539 struct mgmt_rp_pin_code_reply rp;
3542 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3546 bacpy(&rp.addr.bdaddr, bdaddr);
3547 rp.addr.type = BDADDR_BREDR;
3549 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3550 mgmt_status(status), &rp, sizeof(rp));
3552 mgmt_pending_remove(cmd);
3557 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3560 struct pending_cmd *cmd;
3561 struct mgmt_rp_pin_code_reply rp;
3564 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3568 bacpy(&rp.addr.bdaddr, bdaddr);
3569 rp.addr.type = BDADDR_BREDR;
3571 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3572 mgmt_status(status), &rp, sizeof(rp));
3574 mgmt_pending_remove(cmd);
3579 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3580 u8 link_type, u8 addr_type, __le32 value,
3583 struct mgmt_ev_user_confirm_request ev;
3585 BT_DBG("%s", hdev->name);
3587 bacpy(&ev.addr.bdaddr, bdaddr);
3588 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3589 ev.confirm_hint = confirm_hint;
3592 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3596 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3597 u8 link_type, u8 addr_type)
3599 struct mgmt_ev_user_passkey_request ev;
3601 BT_DBG("%s", hdev->name);
3603 bacpy(&ev.addr.bdaddr, bdaddr);
3604 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3606 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3610 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3611 u8 link_type, u8 addr_type, u8 status,
3614 struct pending_cmd *cmd;
3615 struct mgmt_rp_user_confirm_reply rp;
3618 cmd = mgmt_pending_find(opcode, hdev);
3622 bacpy(&rp.addr.bdaddr, bdaddr);
3623 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3624 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3627 mgmt_pending_remove(cmd);
3632 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3633 u8 link_type, u8 addr_type, u8 status)
3635 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3636 status, MGMT_OP_USER_CONFIRM_REPLY);
3639 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3640 u8 link_type, u8 addr_type, u8 status)
3642 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3644 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3647 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3648 u8 link_type, u8 addr_type, u8 status)
3650 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3651 status, MGMT_OP_USER_PASSKEY_REPLY);
3654 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3655 u8 link_type, u8 addr_type, u8 status)
3657 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3659 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3662 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3663 u8 link_type, u8 addr_type, u32 passkey,
3666 struct mgmt_ev_passkey_notify ev;
3668 BT_DBG("%s", hdev->name);
3670 bacpy(&ev.addr.bdaddr, bdaddr);
3671 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3672 ev.passkey = __cpu_to_le32(passkey);
3673 ev.entered = entered;
3675 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3678 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3679 u8 addr_type, u8 status)
3681 struct mgmt_ev_auth_failed ev;
3683 bacpy(&ev.addr.bdaddr, bdaddr);
3684 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3685 ev.status = mgmt_status(status);
3687 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3690 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3692 struct cmd_lookup match = { NULL, hdev };
3693 bool changed = false;
3697 u8 mgmt_err = mgmt_status(status);
3698 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3699 cmd_status_rsp, &mgmt_err);
3703 if (test_bit(HCI_AUTH, &hdev->flags)) {
3704 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3707 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3711 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3715 err = new_settings(hdev, match.sk);
3723 static void clear_eir(struct hci_request *req)
3725 struct hci_dev *hdev = req->hdev;
3726 struct hci_cp_write_eir cp;
3728 if (!lmp_ext_inq_capable(hdev))
3731 memset(hdev->eir, 0, sizeof(hdev->eir));
3733 memset(&cp, 0, sizeof(cp));
3735 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3738 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3740 struct cmd_lookup match = { NULL, hdev };
3741 struct hci_request req;
3742 bool changed = false;
3746 u8 mgmt_err = mgmt_status(status);
3748 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
3750 err = new_settings(hdev, NULL);
3752 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3759 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3762 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3766 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
3769 err = new_settings(hdev, match.sk);
3774 hci_req_init(&req, hdev);
3776 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3781 hci_req_run(&req, NULL);
3786 static void sk_lookup(struct pending_cmd *cmd, void *data)
3788 struct cmd_lookup *match = data;
3790 if (match->sk == NULL) {
3791 match->sk = cmd->sk;
3792 sock_hold(match->sk);
3796 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3799 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3802 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
3803 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
3804 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
3807 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3816 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
3818 struct mgmt_cp_set_local_name ev;
3819 struct pending_cmd *cmd;
3824 memset(&ev, 0, sizeof(ev));
3825 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3826 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
3828 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3830 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3832 /* If this is a HCI command related to powering on the
3833 * HCI dev don't send any mgmt signals.
3835 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
3839 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
3840 cmd ? cmd->sk : NULL);
3843 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
3844 u8 *randomizer, u8 status)
3846 struct pending_cmd *cmd;
3849 BT_DBG("%s status %u", hdev->name, status);
3851 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3856 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3857 mgmt_status(status));
3859 struct mgmt_rp_read_local_oob_data rp;
3861 memcpy(rp.hash, hash, sizeof(rp.hash));
3862 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
3864 err = cmd_complete(cmd->sk, hdev->id,
3865 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
3869 mgmt_pending_remove(cmd);
3874 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3876 struct cmd_lookup match = { NULL, hdev };
3877 bool changed = false;
3881 u8 mgmt_err = mgmt_status(status);
3883 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
3885 err = new_settings(hdev, NULL);
3887 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
3894 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3897 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3901 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
3904 err = new_settings(hdev, match.sk);
3912 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3913 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
3914 ssp, u8 *eir, u16 eir_len)
3917 struct mgmt_ev_device_found *ev = (void *) buf;
3920 /* Leave 5 bytes for a potential CoD field */
3921 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
3924 memset(buf, 0, sizeof(buf));
3926 bacpy(&ev->addr.bdaddr, bdaddr);
3927 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3930 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
3932 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
3935 memcpy(ev->eir, eir, eir_len);
3937 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
3938 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
3941 ev->eir_len = cpu_to_le16(eir_len);
3942 ev_size = sizeof(*ev) + eir_len;
3944 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
3947 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3948 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
3950 struct mgmt_ev_device_found *ev;
3951 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
3954 ev = (struct mgmt_ev_device_found *) buf;
3956 memset(buf, 0, sizeof(buf));
3958 bacpy(&ev->addr.bdaddr, bdaddr);
3959 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3962 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
3965 ev->eir_len = cpu_to_le16(eir_len);
3967 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
3968 sizeof(*ev) + eir_len, NULL);
3971 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3973 struct pending_cmd *cmd;
3977 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3979 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3983 type = hdev->discovery.type;
3985 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3986 &type, sizeof(type));
3987 mgmt_pending_remove(cmd);
3992 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3994 struct pending_cmd *cmd;
3997 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4001 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4002 &hdev->discovery.type, sizeof(hdev->discovery.type));
4003 mgmt_pending_remove(cmd);
4008 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4010 struct mgmt_ev_discovering ev;
4011 struct pending_cmd *cmd;
4013 BT_DBG("%s discovering %u", hdev->name, discovering);
4016 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4018 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4021 u8 type = hdev->discovery.type;
4023 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4025 mgmt_pending_remove(cmd);
4028 memset(&ev, 0, sizeof(ev));
4029 ev.type = hdev->discovery.type;
4030 ev.discovering = discovering;
4032 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4035 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4037 struct pending_cmd *cmd;
4038 struct mgmt_ev_device_blocked ev;
4040 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4042 bacpy(&ev.addr.bdaddr, bdaddr);
4043 ev.addr.type = type;
4045 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4046 cmd ? cmd->sk : NULL);
4049 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4051 struct pending_cmd *cmd;
4052 struct mgmt_ev_device_unblocked ev;
4054 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4056 bacpy(&ev.addr.bdaddr, bdaddr);
4057 ev.addr.type = type;
4059 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4060 cmd ? cmd->sk : NULL);
4063 module_param(enable_hs, bool, 0644);
4064 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");