2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
84 MGMT_EV_INDEX_REMOVED,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
106 * These LE scan and inquiry parameters were chosen according to LE General
107 * Discovery Procedure specification.
109 #define LE_SCAN_TYPE 0x01
110 #define LE_SCAN_WIN 0x12
111 #define LE_SCAN_INT 0x12
112 #define LE_SCAN_TIMEOUT_LE_ONLY 10240 /* TGAP(gen_disc_scan_min) */
113 #define LE_SCAN_TIMEOUT_BREDR_LE 5120 /* TGAP(100)/2 */
115 #define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
116 #define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
118 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
120 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
121 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
124 struct list_head list;
132 /* HCI to MGMT error code conversion table */
133 static u8 mgmt_status_table[] = {
135 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
136 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
137 MGMT_STATUS_FAILED, /* Hardware Failure */
138 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
139 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
140 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
141 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
142 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
143 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
144 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
145 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
146 MGMT_STATUS_BUSY, /* Command Disallowed */
147 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
148 MGMT_STATUS_REJECTED, /* Rejected Security */
149 MGMT_STATUS_REJECTED, /* Rejected Personal */
150 MGMT_STATUS_TIMEOUT, /* Host Timeout */
151 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
152 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
153 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
154 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
155 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
156 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
157 MGMT_STATUS_BUSY, /* Repeated Attempts */
158 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
159 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
161 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
162 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
163 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
165 MGMT_STATUS_FAILED, /* Unspecified Error */
166 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
167 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
168 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
169 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
170 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
171 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
172 MGMT_STATUS_FAILED, /* Unit Link Key Used */
173 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
174 MGMT_STATUS_TIMEOUT, /* Instant Passed */
175 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
176 MGMT_STATUS_FAILED, /* Transaction Collision */
177 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
178 MGMT_STATUS_REJECTED, /* QoS Rejected */
179 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
180 MGMT_STATUS_REJECTED, /* Insufficient Security */
181 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
182 MGMT_STATUS_BUSY, /* Role Switch Pending */
183 MGMT_STATUS_FAILED, /* Slot Violation */
184 MGMT_STATUS_FAILED, /* Role Switch Failed */
185 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
186 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
187 MGMT_STATUS_BUSY, /* Host Busy Pairing */
188 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
189 MGMT_STATUS_BUSY, /* Controller Busy */
190 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
191 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
192 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
193 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
194 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
197 bool mgmt_valid_hdev(struct hci_dev *hdev)
199 return hdev->dev_type == HCI_BREDR;
202 static u8 mgmt_status(u8 hci_status)
204 if (hci_status < ARRAY_SIZE(mgmt_status_table))
205 return mgmt_status_table[hci_status];
207 return MGMT_STATUS_FAILED;
210 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
213 struct mgmt_hdr *hdr;
214 struct mgmt_ev_cmd_status *ev;
217 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
219 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
223 hdr = (void *) skb_put(skb, sizeof(*hdr));
225 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
226 hdr->index = cpu_to_le16(index);
227 hdr->len = cpu_to_le16(sizeof(*ev));
229 ev = (void *) skb_put(skb, sizeof(*ev));
231 ev->opcode = cpu_to_le16(cmd);
233 err = sock_queue_rcv_skb(sk, skb);
240 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
241 void *rp, size_t rp_len)
244 struct mgmt_hdr *hdr;
245 struct mgmt_ev_cmd_complete *ev;
248 BT_DBG("sock %p", sk);
250 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
254 hdr = (void *) skb_put(skb, sizeof(*hdr));
256 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
257 hdr->index = cpu_to_le16(index);
258 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
260 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
261 ev->opcode = cpu_to_le16(cmd);
265 memcpy(ev->data, rp, rp_len);
267 err = sock_queue_rcv_skb(sk, skb);
274 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
277 struct mgmt_rp_read_version rp;
279 BT_DBG("sock %p", sk);
281 rp.version = MGMT_VERSION;
282 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
284 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
288 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
291 struct mgmt_rp_read_commands *rp;
292 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
293 const u16 num_events = ARRAY_SIZE(mgmt_events);
298 BT_DBG("sock %p", sk);
300 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
302 rp = kmalloc(rp_size, GFP_KERNEL);
306 rp->num_commands = __constant_cpu_to_le16(num_commands);
307 rp->num_events = __constant_cpu_to_le16(num_events);
309 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
310 put_unaligned_le16(mgmt_commands[i], opcode);
312 for (i = 0; i < num_events; i++, opcode++)
313 put_unaligned_le16(mgmt_events[i], opcode);
315 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
322 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
325 struct mgmt_rp_read_index_list *rp;
331 BT_DBG("sock %p", sk);
333 read_lock(&hci_dev_list_lock);
336 list_for_each_entry(d, &hci_dev_list, list) {
337 if (!mgmt_valid_hdev(d))
343 rp_len = sizeof(*rp) + (2 * count);
344 rp = kmalloc(rp_len, GFP_ATOMIC);
346 read_unlock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (test_bit(HCI_SETUP, &d->dev_flags))
355 if (!mgmt_valid_hdev(d))
358 rp->index[count++] = cpu_to_le16(d->id);
359 BT_DBG("Added hci%u", d->id);
362 rp->num_controllers = cpu_to_le16(count);
363 rp_len = sizeof(*rp) + (2 * count);
365 read_unlock(&hci_dev_list_lock);
367 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
375 static u32 get_supported_settings(struct hci_dev *hdev)
379 settings |= MGMT_SETTING_POWERED;
380 settings |= MGMT_SETTING_PAIRABLE;
382 if (lmp_ssp_capable(hdev))
383 settings |= MGMT_SETTING_SSP;
385 if (lmp_bredr_capable(hdev)) {
386 settings |= MGMT_SETTING_CONNECTABLE;
387 settings |= MGMT_SETTING_FAST_CONNECTABLE;
388 settings |= MGMT_SETTING_DISCOVERABLE;
389 settings |= MGMT_SETTING_BREDR;
390 settings |= MGMT_SETTING_LINK_SECURITY;
394 settings |= MGMT_SETTING_HS;
396 if (lmp_le_capable(hdev))
397 settings |= MGMT_SETTING_LE;
402 static u32 get_current_settings(struct hci_dev *hdev)
406 if (hdev_is_powered(hdev))
407 settings |= MGMT_SETTING_POWERED;
409 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_CONNECTABLE;
412 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
413 settings |= MGMT_SETTING_DISCOVERABLE;
415 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
416 settings |= MGMT_SETTING_PAIRABLE;
418 if (lmp_bredr_capable(hdev))
419 settings |= MGMT_SETTING_BREDR;
421 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
422 settings |= MGMT_SETTING_LE;
424 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
425 settings |= MGMT_SETTING_LINK_SECURITY;
427 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
428 settings |= MGMT_SETTING_SSP;
430 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
431 settings |= MGMT_SETTING_HS;
436 #define PNP_INFO_SVCLASS_ID 0x1200
438 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
440 u8 *ptr = data, *uuids_start = NULL;
441 struct bt_uuid *uuid;
446 list_for_each_entry(uuid, &hdev->uuids, list) {
449 if (uuid->size != 16)
452 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
456 if (uuid16 == PNP_INFO_SVCLASS_ID)
462 uuids_start[1] = EIR_UUID16_ALL;
466 /* Stop if not enough space to put next UUID */
467 if ((ptr - data) + sizeof(u16) > len) {
468 uuids_start[1] = EIR_UUID16_SOME;
472 *ptr++ = (uuid16 & 0x00ff);
473 *ptr++ = (uuid16 & 0xff00) >> 8;
474 uuids_start[0] += sizeof(uuid16);
480 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
482 u8 *ptr = data, *uuids_start = NULL;
483 struct bt_uuid *uuid;
488 list_for_each_entry(uuid, &hdev->uuids, list) {
489 if (uuid->size != 32)
495 uuids_start[1] = EIR_UUID32_ALL;
499 /* Stop if not enough space to put next UUID */
500 if ((ptr - data) + sizeof(u32) > len) {
501 uuids_start[1] = EIR_UUID32_SOME;
505 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
507 uuids_start[0] += sizeof(u32);
513 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515 u8 *ptr = data, *uuids_start = NULL;
516 struct bt_uuid *uuid;
521 list_for_each_entry(uuid, &hdev->uuids, list) {
522 if (uuid->size != 128)
528 uuids_start[1] = EIR_UUID128_ALL;
532 /* Stop if not enough space to put next UUID */
533 if ((ptr - data) + 16 > len) {
534 uuids_start[1] = EIR_UUID128_SOME;
538 memcpy(ptr, uuid->uuid, 16);
540 uuids_start[0] += 16;
546 static void create_eir(struct hci_dev *hdev, u8 *data)
551 name_len = strlen(hdev->dev_name);
557 ptr[1] = EIR_NAME_SHORT;
559 ptr[1] = EIR_NAME_COMPLETE;
561 /* EIR Data length */
562 ptr[0] = name_len + 1;
564 memcpy(ptr + 2, hdev->dev_name, name_len);
566 ptr += (name_len + 2);
569 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
571 ptr[1] = EIR_TX_POWER;
572 ptr[2] = (u8) hdev->inq_tx_power;
577 if (hdev->devid_source > 0) {
579 ptr[1] = EIR_DEVICE_ID;
581 put_unaligned_le16(hdev->devid_source, ptr + 2);
582 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
583 put_unaligned_le16(hdev->devid_product, ptr + 6);
584 put_unaligned_le16(hdev->devid_version, ptr + 8);
589 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
590 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
591 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
594 static void update_eir(struct hci_request *req)
596 struct hci_dev *hdev = req->hdev;
597 struct hci_cp_write_eir cp;
599 if (!hdev_is_powered(hdev))
602 if (!lmp_ext_inq_capable(hdev))
605 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
608 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
611 memset(&cp, 0, sizeof(cp));
613 create_eir(hdev, cp.data);
615 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
618 memcpy(hdev->eir, cp.data, sizeof(cp.data));
620 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
623 static u8 get_service_classes(struct hci_dev *hdev)
625 struct bt_uuid *uuid;
628 list_for_each_entry(uuid, &hdev->uuids, list)
629 val |= uuid->svc_hint;
634 static void update_class(struct hci_request *req)
636 struct hci_dev *hdev = req->hdev;
639 BT_DBG("%s", hdev->name);
641 if (!hdev_is_powered(hdev))
644 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
647 cod[0] = hdev->minor_class;
648 cod[1] = hdev->major_class;
649 cod[2] = get_service_classes(hdev);
651 if (memcmp(cod, hdev->dev_class, 3) == 0)
654 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
657 static void service_cache_off(struct work_struct *work)
659 struct hci_dev *hdev = container_of(work, struct hci_dev,
661 struct hci_request req;
663 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
666 hci_req_init(&req, hdev);
673 hci_dev_unlock(hdev);
675 hci_req_run(&req, NULL);
678 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
680 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
683 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
685 /* Non-mgmt controlled devices get this bit set
686 * implicitly so that pairing works for them, however
687 * for mgmt we require user-space to explicitly enable
690 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
693 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
694 void *data, u16 data_len)
696 struct mgmt_rp_read_info rp;
698 BT_DBG("sock %p %s", sk, hdev->name);
702 memset(&rp, 0, sizeof(rp));
704 bacpy(&rp.bdaddr, &hdev->bdaddr);
706 rp.version = hdev->hci_ver;
707 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
709 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
710 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
712 memcpy(rp.dev_class, hdev->dev_class, 3);
714 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
715 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
717 hci_dev_unlock(hdev);
719 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
723 static void mgmt_pending_free(struct pending_cmd *cmd)
730 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
731 struct hci_dev *hdev, void *data,
734 struct pending_cmd *cmd;
736 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
740 cmd->opcode = opcode;
741 cmd->index = hdev->id;
743 cmd->param = kmalloc(len, GFP_KERNEL);
750 memcpy(cmd->param, data, len);
755 list_add(&cmd->list, &hdev->mgmt_pending);
760 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
761 void (*cb)(struct pending_cmd *cmd,
765 struct pending_cmd *cmd, *tmp;
767 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
768 if (opcode > 0 && cmd->opcode != opcode)
775 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
777 struct pending_cmd *cmd;
779 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
780 if (cmd->opcode == opcode)
787 static void mgmt_pending_remove(struct pending_cmd *cmd)
789 list_del(&cmd->list);
790 mgmt_pending_free(cmd);
793 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
795 __le32 settings = cpu_to_le32(get_current_settings(hdev));
797 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
801 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
804 struct mgmt_mode *cp = data;
805 struct pending_cmd *cmd;
808 BT_DBG("request for %s", hdev->name);
810 if (cp->val != 0x00 && cp->val != 0x01)
811 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
812 MGMT_STATUS_INVALID_PARAMS);
816 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
817 cancel_delayed_work(&hdev->power_off);
820 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
822 err = mgmt_powered(hdev, 1);
827 if (!!cp->val == hdev_is_powered(hdev)) {
828 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
832 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
833 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
838 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
845 queue_work(hdev->req_workqueue, &hdev->power_on);
847 queue_work(hdev->req_workqueue, &hdev->power_off.work);
852 hci_dev_unlock(hdev);
856 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
857 struct sock *skip_sk)
860 struct mgmt_hdr *hdr;
862 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
866 hdr = (void *) skb_put(skb, sizeof(*hdr));
867 hdr->opcode = cpu_to_le16(event);
869 hdr->index = cpu_to_le16(hdev->id);
871 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
872 hdr->len = cpu_to_le16(data_len);
875 memcpy(skb_put(skb, data_len), data, data_len);
878 __net_timestamp(skb);
880 hci_send_to_control(skb, skip_sk);
886 static int new_settings(struct hci_dev *hdev, struct sock *skip)
890 ev = cpu_to_le32(get_current_settings(hdev));
892 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
895 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
898 struct mgmt_cp_set_discoverable *cp = data;
899 struct pending_cmd *cmd;
904 BT_DBG("request for %s", hdev->name);
906 if (!lmp_bredr_capable(hdev))
907 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
908 MGMT_STATUS_NOT_SUPPORTED);
910 if (cp->val != 0x00 && cp->val != 0x01)
911 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
912 MGMT_STATUS_INVALID_PARAMS);
914 timeout = __le16_to_cpu(cp->timeout);
915 if (!cp->val && timeout > 0)
916 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
917 MGMT_STATUS_INVALID_PARAMS);
921 if (!hdev_is_powered(hdev) && timeout > 0) {
922 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
923 MGMT_STATUS_NOT_POWERED);
927 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
928 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
929 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
934 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
935 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
936 MGMT_STATUS_REJECTED);
940 if (!hdev_is_powered(hdev)) {
941 bool changed = false;
943 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
944 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
948 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
953 err = new_settings(hdev, sk);
958 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
959 if (hdev->discov_timeout > 0) {
960 cancel_delayed_work(&hdev->discov_off);
961 hdev->discov_timeout = 0;
964 if (cp->val && timeout > 0) {
965 hdev->discov_timeout = timeout;
966 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
967 msecs_to_jiffies(hdev->discov_timeout * 1000));
970 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
974 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
983 scan |= SCAN_INQUIRY;
985 cancel_delayed_work(&hdev->discov_off);
987 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
989 mgmt_pending_remove(cmd);
992 hdev->discov_timeout = timeout;
995 hci_dev_unlock(hdev);
999 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1002 struct mgmt_mode *cp = data;
1003 struct pending_cmd *cmd;
1007 BT_DBG("request for %s", hdev->name);
1009 if (!lmp_bredr_capable(hdev))
1010 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1011 MGMT_STATUS_NOT_SUPPORTED);
1013 if (cp->val != 0x00 && cp->val != 0x01)
1014 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1015 MGMT_STATUS_INVALID_PARAMS);
1019 if (!hdev_is_powered(hdev)) {
1020 bool changed = false;
1022 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1026 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1028 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1029 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1032 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1037 err = new_settings(hdev, sk);
1042 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1043 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1044 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1049 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1050 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1054 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1065 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1066 hdev->discov_timeout > 0)
1067 cancel_delayed_work(&hdev->discov_off);
1070 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1072 mgmt_pending_remove(cmd);
1075 hci_dev_unlock(hdev);
1079 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1082 struct mgmt_mode *cp = data;
1085 BT_DBG("request for %s", hdev->name);
1087 if (cp->val != 0x00 && cp->val != 0x01)
1088 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1089 MGMT_STATUS_INVALID_PARAMS);
1094 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1096 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1098 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1102 err = new_settings(hdev, sk);
1105 hci_dev_unlock(hdev);
1109 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1112 struct mgmt_mode *cp = data;
1113 struct pending_cmd *cmd;
1117 BT_DBG("request for %s", hdev->name);
1119 if (!lmp_bredr_capable(hdev))
1120 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1121 MGMT_STATUS_NOT_SUPPORTED);
1123 if (cp->val != 0x00 && cp->val != 0x01)
1124 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1125 MGMT_STATUS_INVALID_PARAMS);
1129 if (!hdev_is_powered(hdev)) {
1130 bool changed = false;
1132 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1133 &hdev->dev_flags)) {
1134 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1138 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1143 err = new_settings(hdev, sk);
1148 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1149 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1156 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1157 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1161 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1167 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1169 mgmt_pending_remove(cmd);
1174 hci_dev_unlock(hdev);
1178 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1180 struct mgmt_mode *cp = data;
1181 struct pending_cmd *cmd;
1185 BT_DBG("request for %s", hdev->name);
1187 if (!lmp_ssp_capable(hdev))
1188 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1189 MGMT_STATUS_NOT_SUPPORTED);
1191 if (cp->val != 0x00 && cp->val != 0x01)
1192 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1193 MGMT_STATUS_INVALID_PARAMS);
1199 if (!hdev_is_powered(hdev)) {
1200 bool changed = false;
1202 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1203 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1207 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1212 err = new_settings(hdev, sk);
1217 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1218 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1223 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1224 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1228 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1234 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1236 mgmt_pending_remove(cmd);
1241 hci_dev_unlock(hdev);
1245 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1247 struct mgmt_mode *cp = data;
1249 BT_DBG("request for %s", hdev->name);
1252 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1253 MGMT_STATUS_NOT_SUPPORTED);
1255 if (cp->val != 0x00 && cp->val != 0x01)
1256 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1257 MGMT_STATUS_INVALID_PARAMS);
1260 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1262 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1264 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1267 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1269 struct mgmt_mode *cp = data;
1270 struct hci_cp_write_le_host_supported hci_cp;
1271 struct pending_cmd *cmd;
1275 BT_DBG("request for %s", hdev->name);
1277 if (!lmp_le_capable(hdev))
1278 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1279 MGMT_STATUS_NOT_SUPPORTED);
1281 if (cp->val != 0x00 && cp->val != 0x01)
1282 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1283 MGMT_STATUS_INVALID_PARAMS);
1288 enabled = lmp_host_le_capable(hdev);
1290 if (!hdev_is_powered(hdev) || val == enabled) {
1291 bool changed = false;
1293 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1294 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1298 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1303 err = new_settings(hdev, sk);
1308 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1309 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1314 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1320 memset(&hci_cp, 0, sizeof(hci_cp));
1324 hci_cp.simul = lmp_le_br_capable(hdev);
1327 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1330 mgmt_pending_remove(cmd);
1333 hci_dev_unlock(hdev);
1337 /* This is a helper function to test for pending mgmt commands that can
1338 * cause CoD or EIR HCI commands. We can only allow one such pending
1339 * mgmt command at a time since otherwise we cannot easily track what
1340 * the current values are, will be, and based on that calculate if a new
1341 * HCI command needs to be sent and if yes with what value.
1343 static bool pending_eir_or_class(struct hci_dev *hdev)
1345 struct pending_cmd *cmd;
1347 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1348 switch (cmd->opcode) {
1349 case MGMT_OP_ADD_UUID:
1350 case MGMT_OP_REMOVE_UUID:
1351 case MGMT_OP_SET_DEV_CLASS:
1352 case MGMT_OP_SET_POWERED:
1360 static const u8 bluetooth_base_uuid[] = {
1361 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1362 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1365 static u8 get_uuid_size(const u8 *uuid)
1369 if (memcmp(uuid, bluetooth_base_uuid, 12))
1372 val = get_unaligned_le32(&uuid[12]);
1379 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1381 struct pending_cmd *cmd;
1385 cmd = mgmt_pending_find(mgmt_op, hdev);
1389 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1390 hdev->dev_class, 3);
1392 mgmt_pending_remove(cmd);
1395 hci_dev_unlock(hdev);
1398 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1400 BT_DBG("status 0x%02x", status);
1402 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1405 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1407 struct mgmt_cp_add_uuid *cp = data;
1408 struct pending_cmd *cmd;
1409 struct hci_request req;
1410 struct bt_uuid *uuid;
1413 BT_DBG("request for %s", hdev->name);
1417 if (pending_eir_or_class(hdev)) {
1418 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1423 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1429 memcpy(uuid->uuid, cp->uuid, 16);
1430 uuid->svc_hint = cp->svc_hint;
1431 uuid->size = get_uuid_size(cp->uuid);
1433 list_add_tail(&uuid->list, &hdev->uuids);
1435 hci_req_init(&req, hdev);
1440 err = hci_req_run(&req, add_uuid_complete);
1442 if (err != -ENODATA)
1445 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1446 hdev->dev_class, 3);
1450 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1459 hci_dev_unlock(hdev);
1463 static bool enable_service_cache(struct hci_dev *hdev)
1465 if (!hdev_is_powered(hdev))
1468 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1469 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1477 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1479 BT_DBG("status 0x%02x", status);
1481 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1484 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1487 struct mgmt_cp_remove_uuid *cp = data;
1488 struct pending_cmd *cmd;
1489 struct bt_uuid *match, *tmp;
1490 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1491 struct hci_request req;
1494 BT_DBG("request for %s", hdev->name);
1498 if (pending_eir_or_class(hdev)) {
1499 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1504 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1505 err = hci_uuids_clear(hdev);
1507 if (enable_service_cache(hdev)) {
1508 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1509 0, hdev->dev_class, 3);
1518 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1519 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1522 list_del(&match->list);
1528 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1529 MGMT_STATUS_INVALID_PARAMS);
1534 hci_req_init(&req, hdev);
1539 err = hci_req_run(&req, remove_uuid_complete);
1541 if (err != -ENODATA)
1544 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1545 hdev->dev_class, 3);
1549 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1558 hci_dev_unlock(hdev);
1562 static void set_class_complete(struct hci_dev *hdev, u8 status)
1564 BT_DBG("status 0x%02x", status);
1566 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1569 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1572 struct mgmt_cp_set_dev_class *cp = data;
1573 struct pending_cmd *cmd;
1574 struct hci_request req;
1577 BT_DBG("request for %s", hdev->name);
1579 if (!lmp_bredr_capable(hdev))
1580 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1581 MGMT_STATUS_NOT_SUPPORTED);
1585 if (pending_eir_or_class(hdev)) {
1586 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1591 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1592 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1593 MGMT_STATUS_INVALID_PARAMS);
1597 hdev->major_class = cp->major;
1598 hdev->minor_class = cp->minor;
1600 if (!hdev_is_powered(hdev)) {
1601 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1602 hdev->dev_class, 3);
1606 hci_req_init(&req, hdev);
1608 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1609 hci_dev_unlock(hdev);
1610 cancel_delayed_work_sync(&hdev->service_cache);
1617 err = hci_req_run(&req, set_class_complete);
1619 if (err != -ENODATA)
1622 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1623 hdev->dev_class, 3);
1627 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1636 hci_dev_unlock(hdev);
1640 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1643 struct mgmt_cp_load_link_keys *cp = data;
1644 u16 key_count, expected_len;
1647 key_count = __le16_to_cpu(cp->key_count);
1649 expected_len = sizeof(*cp) + key_count *
1650 sizeof(struct mgmt_link_key_info);
1651 if (expected_len != len) {
1652 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1654 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1655 MGMT_STATUS_INVALID_PARAMS);
1658 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1659 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1660 MGMT_STATUS_INVALID_PARAMS);
1662 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1665 for (i = 0; i < key_count; i++) {
1666 struct mgmt_link_key_info *key = &cp->keys[i];
1668 if (key->addr.type != BDADDR_BREDR)
1669 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1670 MGMT_STATUS_INVALID_PARAMS);
1675 hci_link_keys_clear(hdev);
1677 set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1680 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1682 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1684 for (i = 0; i < key_count; i++) {
1685 struct mgmt_link_key_info *key = &cp->keys[i];
1687 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1688 key->type, key->pin_len);
1691 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1693 hci_dev_unlock(hdev);
1698 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1699 u8 addr_type, struct sock *skip_sk)
1701 struct mgmt_ev_device_unpaired ev;
1703 bacpy(&ev.addr.bdaddr, bdaddr);
1704 ev.addr.type = addr_type;
1706 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1710 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1713 struct mgmt_cp_unpair_device *cp = data;
1714 struct mgmt_rp_unpair_device rp;
1715 struct hci_cp_disconnect dc;
1716 struct pending_cmd *cmd;
1717 struct hci_conn *conn;
1720 memset(&rp, 0, sizeof(rp));
1721 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1722 rp.addr.type = cp->addr.type;
1724 if (!bdaddr_type_is_valid(cp->addr.type))
1725 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1726 MGMT_STATUS_INVALID_PARAMS,
1729 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1730 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1731 MGMT_STATUS_INVALID_PARAMS,
1736 if (!hdev_is_powered(hdev)) {
1737 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1738 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1742 if (cp->addr.type == BDADDR_BREDR)
1743 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1745 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1748 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1749 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1753 if (cp->disconnect) {
1754 if (cp->addr.type == BDADDR_BREDR)
1755 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1758 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1765 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1767 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1771 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1778 dc.handle = cpu_to_le16(conn->handle);
1779 dc.reason = 0x13; /* Remote User Terminated Connection */
1780 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1782 mgmt_pending_remove(cmd);
1785 hci_dev_unlock(hdev);
1789 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1792 struct mgmt_cp_disconnect *cp = data;
1793 struct mgmt_rp_disconnect rp;
1794 struct hci_cp_disconnect dc;
1795 struct pending_cmd *cmd;
1796 struct hci_conn *conn;
1801 memset(&rp, 0, sizeof(rp));
1802 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1803 rp.addr.type = cp->addr.type;
1805 if (!bdaddr_type_is_valid(cp->addr.type))
1806 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1807 MGMT_STATUS_INVALID_PARAMS,
1812 if (!test_bit(HCI_UP, &hdev->flags)) {
1813 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1814 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1818 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1819 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1820 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1824 if (cp->addr.type == BDADDR_BREDR)
1825 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1828 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1830 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1831 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1832 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1836 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1842 dc.handle = cpu_to_le16(conn->handle);
1843 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1845 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1847 mgmt_pending_remove(cmd);
1850 hci_dev_unlock(hdev);
1854 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1856 switch (link_type) {
1858 switch (addr_type) {
1859 case ADDR_LE_DEV_PUBLIC:
1860 return BDADDR_LE_PUBLIC;
1863 /* Fallback to LE Random address type */
1864 return BDADDR_LE_RANDOM;
1868 /* Fallback to BR/EDR type */
1869 return BDADDR_BREDR;
1873 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1876 struct mgmt_rp_get_connections *rp;
1886 if (!hdev_is_powered(hdev)) {
1887 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1888 MGMT_STATUS_NOT_POWERED);
1893 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1894 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1898 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1899 rp = kmalloc(rp_len, GFP_KERNEL);
1906 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1907 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1909 bacpy(&rp->addr[i].bdaddr, &c->dst);
1910 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1911 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1916 rp->conn_count = cpu_to_le16(i);
1918 /* Recalculate length in case of filtered SCO connections, etc */
1919 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1921 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1927 hci_dev_unlock(hdev);
1931 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1932 struct mgmt_cp_pin_code_neg_reply *cp)
1934 struct pending_cmd *cmd;
1937 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
1942 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1943 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
1945 mgmt_pending_remove(cmd);
1950 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
1953 struct hci_conn *conn;
1954 struct mgmt_cp_pin_code_reply *cp = data;
1955 struct hci_cp_pin_code_reply reply;
1956 struct pending_cmd *cmd;
1963 if (!hdev_is_powered(hdev)) {
1964 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1965 MGMT_STATUS_NOT_POWERED);
1969 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
1971 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1972 MGMT_STATUS_NOT_CONNECTED);
1976 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
1977 struct mgmt_cp_pin_code_neg_reply ncp;
1979 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
1981 BT_ERR("PIN code is not 16 bytes long");
1983 err = send_pin_code_neg_reply(sk, hdev, &ncp);
1985 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1986 MGMT_STATUS_INVALID_PARAMS);
1991 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
1997 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
1998 reply.pin_len = cp->pin_len;
1999 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2001 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2003 mgmt_pending_remove(cmd);
2006 hci_dev_unlock(hdev);
2010 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2013 struct mgmt_cp_set_io_capability *cp = data;
2019 hdev->io_capability = cp->io_capability;
2021 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2022 hdev->io_capability);
2024 hci_dev_unlock(hdev);
2026 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2030 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2032 struct hci_dev *hdev = conn->hdev;
2033 struct pending_cmd *cmd;
2035 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2036 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2039 if (cmd->user_data != conn)
2048 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2050 struct mgmt_rp_pair_device rp;
2051 struct hci_conn *conn = cmd->user_data;
2053 bacpy(&rp.addr.bdaddr, &conn->dst);
2054 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2056 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2059 /* So we don't get further callbacks for this connection */
2060 conn->connect_cfm_cb = NULL;
2061 conn->security_cfm_cb = NULL;
2062 conn->disconn_cfm_cb = NULL;
2066 mgmt_pending_remove(cmd);
2069 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2071 struct pending_cmd *cmd;
2073 BT_DBG("status %u", status);
2075 cmd = find_pairing(conn);
2077 BT_DBG("Unable to find a pending command");
2079 pairing_complete(cmd, mgmt_status(status));
2082 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2084 struct pending_cmd *cmd;
2086 BT_DBG("status %u", status);
2091 cmd = find_pairing(conn);
2093 BT_DBG("Unable to find a pending command");
2095 pairing_complete(cmd, mgmt_status(status));
2098 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2101 struct mgmt_cp_pair_device *cp = data;
2102 struct mgmt_rp_pair_device rp;
2103 struct pending_cmd *cmd;
2104 u8 sec_level, auth_type;
2105 struct hci_conn *conn;
2110 memset(&rp, 0, sizeof(rp));
2111 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2112 rp.addr.type = cp->addr.type;
2114 if (!bdaddr_type_is_valid(cp->addr.type))
2115 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2116 MGMT_STATUS_INVALID_PARAMS,
2121 if (!hdev_is_powered(hdev)) {
2122 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2123 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2127 sec_level = BT_SECURITY_MEDIUM;
2128 if (cp->io_cap == 0x03)
2129 auth_type = HCI_AT_DEDICATED_BONDING;
2131 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2133 if (cp->addr.type == BDADDR_BREDR)
2134 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2135 cp->addr.type, sec_level, auth_type);
2137 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2138 cp->addr.type, sec_level, auth_type);
2143 if (PTR_ERR(conn) == -EBUSY)
2144 status = MGMT_STATUS_BUSY;
2146 status = MGMT_STATUS_CONNECT_FAILED;
2148 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2154 if (conn->connect_cfm_cb) {
2156 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2157 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2161 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2168 /* For LE, just connecting isn't a proof that the pairing finished */
2169 if (cp->addr.type == BDADDR_BREDR)
2170 conn->connect_cfm_cb = pairing_complete_cb;
2172 conn->connect_cfm_cb = le_connect_complete_cb;
2174 conn->security_cfm_cb = pairing_complete_cb;
2175 conn->disconn_cfm_cb = pairing_complete_cb;
2176 conn->io_capability = cp->io_cap;
2177 cmd->user_data = conn;
2179 if (conn->state == BT_CONNECTED &&
2180 hci_conn_security(conn, sec_level, auth_type))
2181 pairing_complete(cmd, 0);
2186 hci_dev_unlock(hdev);
2190 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2193 struct mgmt_addr_info *addr = data;
2194 struct pending_cmd *cmd;
2195 struct hci_conn *conn;
2202 if (!hdev_is_powered(hdev)) {
2203 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2204 MGMT_STATUS_NOT_POWERED);
2208 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2210 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2211 MGMT_STATUS_INVALID_PARAMS);
2215 conn = cmd->user_data;
2217 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2218 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2219 MGMT_STATUS_INVALID_PARAMS);
2223 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2225 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2226 addr, sizeof(*addr));
2228 hci_dev_unlock(hdev);
2232 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2233 bdaddr_t *bdaddr, u8 type, u16 mgmt_op,
2234 u16 hci_op, __le32 passkey)
2236 struct pending_cmd *cmd;
2237 struct hci_conn *conn;
2242 if (!hdev_is_powered(hdev)) {
2243 err = cmd_status(sk, hdev->id, mgmt_op,
2244 MGMT_STATUS_NOT_POWERED);
2248 if (type == BDADDR_BREDR)
2249 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
2251 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
2254 err = cmd_status(sk, hdev->id, mgmt_op,
2255 MGMT_STATUS_NOT_CONNECTED);
2259 if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
2260 /* Continue with pairing via SMP */
2261 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2264 err = cmd_status(sk, hdev->id, mgmt_op,
2265 MGMT_STATUS_SUCCESS);
2267 err = cmd_status(sk, hdev->id, mgmt_op,
2268 MGMT_STATUS_FAILED);
2273 cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr));
2279 /* Continue with pairing via HCI */
2280 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2281 struct hci_cp_user_passkey_reply cp;
2283 bacpy(&cp.bdaddr, bdaddr);
2284 cp.passkey = passkey;
2285 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2287 err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr);
2290 mgmt_pending_remove(cmd);
2293 hci_dev_unlock(hdev);
2297 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2298 void *data, u16 len)
2300 struct mgmt_cp_pin_code_neg_reply *cp = data;
2304 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2305 MGMT_OP_PIN_CODE_NEG_REPLY,
2306 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2309 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2312 struct mgmt_cp_user_confirm_reply *cp = data;
2316 if (len != sizeof(*cp))
2317 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2318 MGMT_STATUS_INVALID_PARAMS);
2320 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2321 MGMT_OP_USER_CONFIRM_REPLY,
2322 HCI_OP_USER_CONFIRM_REPLY, 0);
2325 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2326 void *data, u16 len)
2328 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2332 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2333 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2334 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2337 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2340 struct mgmt_cp_user_passkey_reply *cp = data;
2344 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2345 MGMT_OP_USER_PASSKEY_REPLY,
2346 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2349 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2350 void *data, u16 len)
2352 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2356 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2357 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2358 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2361 static void update_name(struct hci_request *req, const char *name)
2363 struct hci_cp_write_local_name cp;
2365 memcpy(cp.name, name, sizeof(cp.name));
2367 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2370 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2373 struct mgmt_cp_set_local_name *cp = data;
2374 struct pending_cmd *cmd;
2375 struct hci_request req;
2382 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2384 if (!hdev_is_powered(hdev)) {
2385 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2387 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2392 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2398 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2404 hci_req_init(&req, hdev);
2405 update_name(&req, cp->name);
2406 err = hci_req_run(&req, NULL);
2408 mgmt_pending_remove(cmd);
2411 hci_dev_unlock(hdev);
2415 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2416 void *data, u16 data_len)
2418 struct pending_cmd *cmd;
2421 BT_DBG("%s", hdev->name);
2425 if (!hdev_is_powered(hdev)) {
2426 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2427 MGMT_STATUS_NOT_POWERED);
2431 if (!lmp_ssp_capable(hdev)) {
2432 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2433 MGMT_STATUS_NOT_SUPPORTED);
2437 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2438 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2443 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2449 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2451 mgmt_pending_remove(cmd);
2454 hci_dev_unlock(hdev);
2458 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2459 void *data, u16 len)
2461 struct mgmt_cp_add_remote_oob_data *cp = data;
2465 BT_DBG("%s ", hdev->name);
2469 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2472 status = MGMT_STATUS_FAILED;
2474 status = MGMT_STATUS_SUCCESS;
2476 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2477 &cp->addr, sizeof(cp->addr));
2479 hci_dev_unlock(hdev);
2483 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2484 void *data, u16 len)
2486 struct mgmt_cp_remove_remote_oob_data *cp = data;
2490 BT_DBG("%s", hdev->name);
2494 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2496 status = MGMT_STATUS_INVALID_PARAMS;
2498 status = MGMT_STATUS_SUCCESS;
2500 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2501 status, &cp->addr, sizeof(cp->addr));
2503 hci_dev_unlock(hdev);
2507 int mgmt_interleaved_discovery(struct hci_dev *hdev)
2511 BT_DBG("%s", hdev->name);
2515 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
2517 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2519 hci_dev_unlock(hdev);
2524 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2525 void *data, u16 len)
2527 struct mgmt_cp_start_discovery *cp = data;
2528 struct pending_cmd *cmd;
2531 BT_DBG("%s", hdev->name);
2535 if (!hdev_is_powered(hdev)) {
2536 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2537 MGMT_STATUS_NOT_POWERED);
2541 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2542 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2547 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2548 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2553 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2559 hdev->discovery.type = cp->type;
2561 switch (hdev->discovery.type) {
2562 case DISCOV_TYPE_BREDR:
2563 if (!lmp_bredr_capable(hdev)) {
2564 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2565 MGMT_STATUS_NOT_SUPPORTED);
2566 mgmt_pending_remove(cmd);
2570 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2573 case DISCOV_TYPE_LE:
2574 if (!lmp_host_le_capable(hdev)) {
2575 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2576 MGMT_STATUS_NOT_SUPPORTED);
2577 mgmt_pending_remove(cmd);
2581 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2582 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2585 case DISCOV_TYPE_INTERLEAVED:
2586 if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
2587 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2588 MGMT_STATUS_NOT_SUPPORTED);
2589 mgmt_pending_remove(cmd);
2593 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, LE_SCAN_WIN,
2594 LE_SCAN_TIMEOUT_BREDR_LE);
2598 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2599 MGMT_STATUS_INVALID_PARAMS);
2600 mgmt_pending_remove(cmd);
2605 mgmt_pending_remove(cmd);
2607 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2610 hci_dev_unlock(hdev);
2614 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2617 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2618 struct pending_cmd *cmd;
2619 struct hci_cp_remote_name_req_cancel cp;
2620 struct inquiry_entry *e;
2623 BT_DBG("%s", hdev->name);
2627 if (!hci_discovery_active(hdev)) {
2628 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2629 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2630 sizeof(mgmt_cp->type));
2634 if (hdev->discovery.type != mgmt_cp->type) {
2635 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2636 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2637 sizeof(mgmt_cp->type));
2641 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2647 switch (hdev->discovery.state) {
2648 case DISCOVERY_FINDING:
2649 if (test_bit(HCI_INQUIRY, &hdev->flags))
2650 err = hci_cancel_inquiry(hdev);
2652 err = hci_cancel_le_scan(hdev);
2656 case DISCOVERY_RESOLVING:
2657 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2660 mgmt_pending_remove(cmd);
2661 err = cmd_complete(sk, hdev->id,
2662 MGMT_OP_STOP_DISCOVERY, 0,
2664 sizeof(mgmt_cp->type));
2665 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2669 bacpy(&cp.bdaddr, &e->data.bdaddr);
2670 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2676 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2681 mgmt_pending_remove(cmd);
2683 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2686 hci_dev_unlock(hdev);
2690 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2693 struct mgmt_cp_confirm_name *cp = data;
2694 struct inquiry_entry *e;
2697 BT_DBG("%s", hdev->name);
2701 if (!hci_discovery_active(hdev)) {
2702 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2703 MGMT_STATUS_FAILED);
2707 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2709 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2710 MGMT_STATUS_INVALID_PARAMS);
2714 if (cp->name_known) {
2715 e->name_state = NAME_KNOWN;
2718 e->name_state = NAME_NEEDED;
2719 hci_inquiry_cache_update_resolve(hdev, e);
2722 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2726 hci_dev_unlock(hdev);
2730 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2733 struct mgmt_cp_block_device *cp = data;
2737 BT_DBG("%s", hdev->name);
2739 if (!bdaddr_type_is_valid(cp->addr.type))
2740 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2741 MGMT_STATUS_INVALID_PARAMS,
2742 &cp->addr, sizeof(cp->addr));
2746 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2748 status = MGMT_STATUS_FAILED;
2750 status = MGMT_STATUS_SUCCESS;
2752 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2753 &cp->addr, sizeof(cp->addr));
2755 hci_dev_unlock(hdev);
2760 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2763 struct mgmt_cp_unblock_device *cp = data;
2767 BT_DBG("%s", hdev->name);
2769 if (!bdaddr_type_is_valid(cp->addr.type))
2770 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
2771 MGMT_STATUS_INVALID_PARAMS,
2772 &cp->addr, sizeof(cp->addr));
2776 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2778 status = MGMT_STATUS_INVALID_PARAMS;
2780 status = MGMT_STATUS_SUCCESS;
2782 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2783 &cp->addr, sizeof(cp->addr));
2785 hci_dev_unlock(hdev);
2790 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2793 struct mgmt_cp_set_device_id *cp = data;
2794 struct hci_request req;
2798 BT_DBG("%s", hdev->name);
2800 source = __le16_to_cpu(cp->source);
2802 if (source > 0x0002)
2803 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2804 MGMT_STATUS_INVALID_PARAMS);
2808 hdev->devid_source = source;
2809 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2810 hdev->devid_product = __le16_to_cpu(cp->product);
2811 hdev->devid_version = __le16_to_cpu(cp->version);
2813 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2815 hci_req_init(&req, hdev);
2817 hci_req_run(&req, NULL);
2819 hci_dev_unlock(hdev);
2824 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2825 void *data, u16 len)
2827 struct mgmt_mode *cp = data;
2828 struct hci_cp_write_page_scan_activity acp;
2832 BT_DBG("%s", hdev->name);
2834 if (!lmp_bredr_capable(hdev))
2835 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2836 MGMT_STATUS_NOT_SUPPORTED);
2838 if (cp->val != 0x00 && cp->val != 0x01)
2839 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2840 MGMT_STATUS_INVALID_PARAMS);
2842 if (!hdev_is_powered(hdev))
2843 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2844 MGMT_STATUS_NOT_POWERED);
2846 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2847 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2848 MGMT_STATUS_REJECTED);
2853 type = PAGE_SCAN_TYPE_INTERLACED;
2855 /* 160 msec page scan interval */
2856 acp.interval = __constant_cpu_to_le16(0x0100);
2858 type = PAGE_SCAN_TYPE_STANDARD; /* default */
2860 /* default 1.28 sec page scan */
2861 acp.interval = __constant_cpu_to_le16(0x0800);
2864 /* default 11.25 msec page scan window */
2865 acp.window = __constant_cpu_to_le16(0x0012);
2867 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp),
2870 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2871 MGMT_STATUS_FAILED);
2875 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
2877 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2878 MGMT_STATUS_FAILED);
2882 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 0,
2885 hci_dev_unlock(hdev);
2889 static bool ltk_is_valid(struct mgmt_ltk_info *key)
2891 if (key->authenticated != 0x00 && key->authenticated != 0x01)
2893 if (key->master != 0x00 && key->master != 0x01)
2895 if (!bdaddr_type_is_le(key->addr.type))
2900 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2901 void *cp_data, u16 len)
2903 struct mgmt_cp_load_long_term_keys *cp = cp_data;
2904 u16 key_count, expected_len;
2907 key_count = __le16_to_cpu(cp->key_count);
2909 expected_len = sizeof(*cp) + key_count *
2910 sizeof(struct mgmt_ltk_info);
2911 if (expected_len != len) {
2912 BT_ERR("load_keys: expected %u bytes, got %u bytes",
2914 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
2915 MGMT_STATUS_INVALID_PARAMS);
2918 BT_DBG("%s key_count %u", hdev->name, key_count);
2920 for (i = 0; i < key_count; i++) {
2921 struct mgmt_ltk_info *key = &cp->keys[i];
2923 if (!ltk_is_valid(key))
2924 return cmd_status(sk, hdev->id,
2925 MGMT_OP_LOAD_LONG_TERM_KEYS,
2926 MGMT_STATUS_INVALID_PARAMS);
2931 hci_smp_ltks_clear(hdev);
2933 for (i = 0; i < key_count; i++) {
2934 struct mgmt_ltk_info *key = &cp->keys[i];
2940 type = HCI_SMP_LTK_SLAVE;
2942 hci_add_ltk(hdev, &key->addr.bdaddr,
2943 bdaddr_to_le(key->addr.type),
2944 type, 0, key->authenticated, key->val,
2945 key->enc_size, key->ediv, key->rand);
2948 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
2951 hci_dev_unlock(hdev);
2956 static const struct mgmt_handler {
2957 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
2961 } mgmt_handlers[] = {
2962 { NULL }, /* 0x0000 (no command) */
2963 { read_version, false, MGMT_READ_VERSION_SIZE },
2964 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
2965 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
2966 { read_controller_info, false, MGMT_READ_INFO_SIZE },
2967 { set_powered, false, MGMT_SETTING_SIZE },
2968 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
2969 { set_connectable, false, MGMT_SETTING_SIZE },
2970 { set_fast_connectable, false, MGMT_SETTING_SIZE },
2971 { set_pairable, false, MGMT_SETTING_SIZE },
2972 { set_link_security, false, MGMT_SETTING_SIZE },
2973 { set_ssp, false, MGMT_SETTING_SIZE },
2974 { set_hs, false, MGMT_SETTING_SIZE },
2975 { set_le, false, MGMT_SETTING_SIZE },
2976 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
2977 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
2978 { add_uuid, false, MGMT_ADD_UUID_SIZE },
2979 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
2980 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
2981 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
2982 { disconnect, false, MGMT_DISCONNECT_SIZE },
2983 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
2984 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
2985 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
2986 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
2987 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
2988 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
2989 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
2990 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
2991 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
2992 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
2993 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
2994 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
2995 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
2996 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
2997 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
2998 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
2999 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3000 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3001 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3002 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3006 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3010 struct mgmt_hdr *hdr;
3011 u16 opcode, index, len;
3012 struct hci_dev *hdev = NULL;
3013 const struct mgmt_handler *handler;
3016 BT_DBG("got %zu bytes", msglen);
3018 if (msglen < sizeof(*hdr))
3021 buf = kmalloc(msglen, GFP_KERNEL);
3025 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3031 opcode = __le16_to_cpu(hdr->opcode);
3032 index = __le16_to_cpu(hdr->index);
3033 len = __le16_to_cpu(hdr->len);
3035 if (len != msglen - sizeof(*hdr)) {
3040 if (index != MGMT_INDEX_NONE) {
3041 hdev = hci_dev_get(index);
3043 err = cmd_status(sk, index, opcode,
3044 MGMT_STATUS_INVALID_INDEX);
3049 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3050 mgmt_handlers[opcode].func == NULL) {
3051 BT_DBG("Unknown op %u", opcode);
3052 err = cmd_status(sk, index, opcode,
3053 MGMT_STATUS_UNKNOWN_COMMAND);
3057 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3058 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3059 err = cmd_status(sk, index, opcode,
3060 MGMT_STATUS_INVALID_INDEX);
3064 handler = &mgmt_handlers[opcode];
3066 if ((handler->var_len && len < handler->data_len) ||
3067 (!handler->var_len && len != handler->data_len)) {
3068 err = cmd_status(sk, index, opcode,
3069 MGMT_STATUS_INVALID_PARAMS);
3074 mgmt_init_hdev(sk, hdev);
3076 cp = buf + sizeof(*hdr);
3078 err = handler->func(sk, hdev, cp, len);
3092 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
3096 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
3097 mgmt_pending_remove(cmd);
3100 int mgmt_index_added(struct hci_dev *hdev)
3102 if (!mgmt_valid_hdev(hdev))
3105 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3108 int mgmt_index_removed(struct hci_dev *hdev)
3110 u8 status = MGMT_STATUS_INVALID_INDEX;
3112 if (!mgmt_valid_hdev(hdev))
3115 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3117 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3122 struct hci_dev *hdev;
3126 static void settings_rsp(struct pending_cmd *cmd, void *data)
3128 struct cmd_lookup *match = data;
3130 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
3132 list_del(&cmd->list);
3134 if (match->sk == NULL) {
3135 match->sk = cmd->sk;
3136 sock_hold(match->sk);
3139 mgmt_pending_free(cmd);
3142 static void set_bredr_scan(struct hci_request *req)
3144 struct hci_dev *hdev = req->hdev;
3147 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3149 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3150 scan |= SCAN_INQUIRY;
3153 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3156 static void powered_complete(struct hci_dev *hdev, u8 status)
3158 struct cmd_lookup match = { NULL, hdev };
3160 BT_DBG("status 0x%02x", status);
3164 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3166 new_settings(hdev, match.sk);
3168 hci_dev_unlock(hdev);
3174 static int powered_update_hci(struct hci_dev *hdev)
3176 struct hci_request req;
3179 hci_req_init(&req, hdev);
3181 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3182 !lmp_host_ssp_capable(hdev)) {
3185 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3188 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
3189 struct hci_cp_write_le_host_supported cp;
3192 cp.simul = lmp_le_br_capable(hdev);
3194 /* Check first if we already have the right
3195 * host state (host features set)
3197 if (cp.le != lmp_host_le_capable(hdev) ||
3198 cp.simul != lmp_host_le_br_capable(hdev))
3199 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3203 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3204 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3205 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3206 sizeof(link_sec), &link_sec);
3208 if (lmp_bredr_capable(hdev)) {
3209 set_bredr_scan(&req);
3211 update_name(&req, hdev->dev_name);
3215 return hci_req_run(&req, powered_complete);
3218 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3220 struct cmd_lookup match = { NULL, hdev };
3221 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3222 u8 zero_cod[] = { 0, 0, 0 };
3225 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3229 if (powered_update_hci(hdev) == 0)
3232 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3237 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3238 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3240 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3241 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3242 zero_cod, sizeof(zero_cod), NULL);
3245 err = new_settings(hdev, match.sk);
3253 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3255 struct cmd_lookup match = { NULL, hdev };
3256 bool changed = false;
3260 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3263 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3267 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3271 err = new_settings(hdev, match.sk);
3279 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3281 struct cmd_lookup match = { NULL, hdev };
3282 bool changed = false;
3286 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3289 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3293 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp,
3297 err = new_settings(hdev, match.sk);
3305 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3307 u8 mgmt_err = mgmt_status(status);
3309 if (scan & SCAN_PAGE)
3310 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3311 cmd_status_rsp, &mgmt_err);
3313 if (scan & SCAN_INQUIRY)
3314 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3315 cmd_status_rsp, &mgmt_err);
3320 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3323 struct mgmt_ev_new_link_key ev;
3325 memset(&ev, 0, sizeof(ev));
3327 ev.store_hint = persistent;
3328 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3329 ev.key.addr.type = BDADDR_BREDR;
3330 ev.key.type = key->type;
3331 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3332 ev.key.pin_len = key->pin_len;
3334 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3337 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3339 struct mgmt_ev_new_long_term_key ev;
3341 memset(&ev, 0, sizeof(ev));
3343 ev.store_hint = persistent;
3344 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3345 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3346 ev.key.authenticated = key->authenticated;
3347 ev.key.enc_size = key->enc_size;
3348 ev.key.ediv = key->ediv;
3350 if (key->type == HCI_SMP_LTK)
3353 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3354 memcpy(ev.key.val, key->val, sizeof(key->val));
3356 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3360 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3361 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3365 struct mgmt_ev_device_connected *ev = (void *) buf;
3368 bacpy(&ev->addr.bdaddr, bdaddr);
3369 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3371 ev->flags = __cpu_to_le32(flags);
3374 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3377 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3378 eir_len = eir_append_data(ev->eir, eir_len,
3379 EIR_CLASS_OF_DEV, dev_class, 3);
3381 ev->eir_len = cpu_to_le16(eir_len);
3383 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3384 sizeof(*ev) + eir_len, NULL);
3387 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3389 struct mgmt_cp_disconnect *cp = cmd->param;
3390 struct sock **sk = data;
3391 struct mgmt_rp_disconnect rp;
3393 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3394 rp.addr.type = cp->addr.type;
3396 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3402 mgmt_pending_remove(cmd);
3405 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3407 struct hci_dev *hdev = data;
3408 struct mgmt_cp_unpair_device *cp = cmd->param;
3409 struct mgmt_rp_unpair_device rp;
3411 memset(&rp, 0, sizeof(rp));
3412 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3413 rp.addr.type = cp->addr.type;
3415 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3417 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3419 mgmt_pending_remove(cmd);
3422 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3423 u8 link_type, u8 addr_type, u8 reason)
3425 struct mgmt_ev_device_disconnected ev;
3426 struct sock *sk = NULL;
3429 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3431 bacpy(&ev.addr.bdaddr, bdaddr);
3432 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3435 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3441 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3447 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3448 u8 link_type, u8 addr_type, u8 status)
3450 struct mgmt_rp_disconnect rp;
3451 struct pending_cmd *cmd;
3454 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3457 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3461 bacpy(&rp.addr.bdaddr, bdaddr);
3462 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3464 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3465 mgmt_status(status), &rp, sizeof(rp));
3467 mgmt_pending_remove(cmd);
3472 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3473 u8 addr_type, u8 status)
3475 struct mgmt_ev_connect_failed ev;
3477 bacpy(&ev.addr.bdaddr, bdaddr);
3478 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3479 ev.status = mgmt_status(status);
3481 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3484 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3486 struct mgmt_ev_pin_code_request ev;
3488 bacpy(&ev.addr.bdaddr, bdaddr);
3489 ev.addr.type = BDADDR_BREDR;
3492 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3496 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3499 struct pending_cmd *cmd;
3500 struct mgmt_rp_pin_code_reply rp;
3503 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3507 bacpy(&rp.addr.bdaddr, bdaddr);
3508 rp.addr.type = BDADDR_BREDR;
3510 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3511 mgmt_status(status), &rp, sizeof(rp));
3513 mgmt_pending_remove(cmd);
3518 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3521 struct pending_cmd *cmd;
3522 struct mgmt_rp_pin_code_reply rp;
3525 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3529 bacpy(&rp.addr.bdaddr, bdaddr);
3530 rp.addr.type = BDADDR_BREDR;
3532 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3533 mgmt_status(status), &rp, sizeof(rp));
3535 mgmt_pending_remove(cmd);
3540 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3541 u8 link_type, u8 addr_type, __le32 value,
3544 struct mgmt_ev_user_confirm_request ev;
3546 BT_DBG("%s", hdev->name);
3548 bacpy(&ev.addr.bdaddr, bdaddr);
3549 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3550 ev.confirm_hint = confirm_hint;
3553 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3557 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3558 u8 link_type, u8 addr_type)
3560 struct mgmt_ev_user_passkey_request ev;
3562 BT_DBG("%s", hdev->name);
3564 bacpy(&ev.addr.bdaddr, bdaddr);
3565 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3567 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3571 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3572 u8 link_type, u8 addr_type, u8 status,
3575 struct pending_cmd *cmd;
3576 struct mgmt_rp_user_confirm_reply rp;
3579 cmd = mgmt_pending_find(opcode, hdev);
3583 bacpy(&rp.addr.bdaddr, bdaddr);
3584 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3585 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3588 mgmt_pending_remove(cmd);
3593 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3594 u8 link_type, u8 addr_type, u8 status)
3596 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3597 status, MGMT_OP_USER_CONFIRM_REPLY);
3600 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3601 u8 link_type, u8 addr_type, u8 status)
3603 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3605 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3608 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3609 u8 link_type, u8 addr_type, u8 status)
3611 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3612 status, MGMT_OP_USER_PASSKEY_REPLY);
3615 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3616 u8 link_type, u8 addr_type, u8 status)
3618 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3620 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3623 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3624 u8 link_type, u8 addr_type, u32 passkey,
3627 struct mgmt_ev_passkey_notify ev;
3629 BT_DBG("%s", hdev->name);
3631 bacpy(&ev.addr.bdaddr, bdaddr);
3632 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3633 ev.passkey = __cpu_to_le32(passkey);
3634 ev.entered = entered;
3636 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3639 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3640 u8 addr_type, u8 status)
3642 struct mgmt_ev_auth_failed ev;
3644 bacpy(&ev.addr.bdaddr, bdaddr);
3645 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3646 ev.status = mgmt_status(status);
3648 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3651 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3653 struct cmd_lookup match = { NULL, hdev };
3654 bool changed = false;
3658 u8 mgmt_err = mgmt_status(status);
3659 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3660 cmd_status_rsp, &mgmt_err);
3664 if (test_bit(HCI_AUTH, &hdev->flags)) {
3665 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3668 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3672 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3676 err = new_settings(hdev, match.sk);
3684 static void clear_eir(struct hci_request *req)
3686 struct hci_dev *hdev = req->hdev;
3687 struct hci_cp_write_eir cp;
3689 if (!lmp_ext_inq_capable(hdev))
3692 memset(hdev->eir, 0, sizeof(hdev->eir));
3694 memset(&cp, 0, sizeof(cp));
3696 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3699 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3701 struct cmd_lookup match = { NULL, hdev };
3702 struct hci_request req;
3703 bool changed = false;
3707 u8 mgmt_err = mgmt_status(status);
3709 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
3711 err = new_settings(hdev, NULL);
3713 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3720 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3723 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3727 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
3730 err = new_settings(hdev, match.sk);
3735 hci_req_init(&req, hdev);
3737 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3742 hci_req_run(&req, NULL);
3747 static void sk_lookup(struct pending_cmd *cmd, void *data)
3749 struct cmd_lookup *match = data;
3751 if (match->sk == NULL) {
3752 match->sk = cmd->sk;
3753 sock_hold(match->sk);
3757 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3760 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3763 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
3764 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
3765 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
3768 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3777 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
3779 struct pending_cmd *cmd;
3780 struct mgmt_cp_set_local_name ev;
3781 bool changed = false;
3784 if (memcmp(name, hdev->dev_name, sizeof(hdev->dev_name)) != 0) {
3785 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3789 memset(&ev, 0, sizeof(ev));
3790 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3791 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
3793 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3797 /* Always assume that either the short or the complete name has
3798 * changed if there was a pending mgmt command */
3802 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3803 mgmt_status(status));
3807 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, &ev,
3814 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev,
3815 sizeof(ev), cmd ? cmd->sk : NULL);
3817 /* EIR is taken care of separately when powering on the
3818 * adapter so only update them here if this is a name change
3819 * unrelated to power on.
3821 if (!test_bit(HCI_INIT, &hdev->flags)) {
3822 struct hci_request req;
3823 hci_req_init(&req, hdev);
3825 hci_req_run(&req, NULL);
3830 mgmt_pending_remove(cmd);
3834 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
3835 u8 *randomizer, u8 status)
3837 struct pending_cmd *cmd;
3840 BT_DBG("%s status %u", hdev->name, status);
3842 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3847 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3848 mgmt_status(status));
3850 struct mgmt_rp_read_local_oob_data rp;
3852 memcpy(rp.hash, hash, sizeof(rp.hash));
3853 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
3855 err = cmd_complete(cmd->sk, hdev->id,
3856 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
3860 mgmt_pending_remove(cmd);
3865 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3867 struct cmd_lookup match = { NULL, hdev };
3868 bool changed = false;
3872 u8 mgmt_err = mgmt_status(status);
3874 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
3876 err = new_settings(hdev, NULL);
3878 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
3885 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3888 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3892 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
3895 err = new_settings(hdev, match.sk);
3903 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3904 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
3905 ssp, u8 *eir, u16 eir_len)
3908 struct mgmt_ev_device_found *ev = (void *) buf;
3911 /* Leave 5 bytes for a potential CoD field */
3912 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
3915 memset(buf, 0, sizeof(buf));
3917 bacpy(&ev->addr.bdaddr, bdaddr);
3918 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3921 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
3923 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
3926 memcpy(ev->eir, eir, eir_len);
3928 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
3929 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
3932 ev->eir_len = cpu_to_le16(eir_len);
3933 ev_size = sizeof(*ev) + eir_len;
3935 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
3938 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3939 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
3941 struct mgmt_ev_device_found *ev;
3942 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
3945 ev = (struct mgmt_ev_device_found *) buf;
3947 memset(buf, 0, sizeof(buf));
3949 bacpy(&ev->addr.bdaddr, bdaddr);
3950 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3953 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
3956 ev->eir_len = cpu_to_le16(eir_len);
3958 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
3959 sizeof(*ev) + eir_len, NULL);
3962 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3964 struct pending_cmd *cmd;
3968 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3970 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3974 type = hdev->discovery.type;
3976 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3977 &type, sizeof(type));
3978 mgmt_pending_remove(cmd);
3983 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3985 struct pending_cmd *cmd;
3988 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3992 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3993 &hdev->discovery.type, sizeof(hdev->discovery.type));
3994 mgmt_pending_remove(cmd);
3999 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4001 struct mgmt_ev_discovering ev;
4002 struct pending_cmd *cmd;
4004 BT_DBG("%s discovering %u", hdev->name, discovering);
4007 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4009 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4012 u8 type = hdev->discovery.type;
4014 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4016 mgmt_pending_remove(cmd);
4019 memset(&ev, 0, sizeof(ev));
4020 ev.type = hdev->discovery.type;
4021 ev.discovering = discovering;
4023 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4026 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4028 struct pending_cmd *cmd;
4029 struct mgmt_ev_device_blocked ev;
4031 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4033 bacpy(&ev.addr.bdaddr, bdaddr);
4034 ev.addr.type = type;
4036 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4037 cmd ? cmd->sk : NULL);
4040 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4042 struct pending_cmd *cmd;
4043 struct mgmt_ev_device_unblocked ev;
4045 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4047 bacpy(&ev.addr.bdaddr, bdaddr);
4048 ev.addr.type = type;
4050 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4051 cmd ? cmd->sk : NULL);
4054 module_param(enable_hs, bool, 0644);
4055 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");