2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
84 MGMT_EV_INDEX_REMOVED,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
106 * These LE scan and inquiry parameters were chosen according to LE General
107 * Discovery Procedure specification.
109 #define LE_SCAN_TYPE 0x01
110 #define LE_SCAN_WIN 0x12
111 #define LE_SCAN_INT 0x12
112 #define LE_SCAN_TIMEOUT_LE_ONLY 10240 /* TGAP(gen_disc_scan_min) */
113 #define LE_SCAN_TIMEOUT_BREDR_LE 5120 /* TGAP(100)/2 */
115 #define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
116 #define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
118 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
120 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
121 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
124 struct list_head list;
132 /* HCI to MGMT error code conversion table */
133 static u8 mgmt_status_table[] = {
135 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
136 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
137 MGMT_STATUS_FAILED, /* Hardware Failure */
138 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
139 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
140 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
141 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
142 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
143 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
144 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
145 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
146 MGMT_STATUS_BUSY, /* Command Disallowed */
147 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
148 MGMT_STATUS_REJECTED, /* Rejected Security */
149 MGMT_STATUS_REJECTED, /* Rejected Personal */
150 MGMT_STATUS_TIMEOUT, /* Host Timeout */
151 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
152 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
153 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
154 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
155 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
156 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
157 MGMT_STATUS_BUSY, /* Repeated Attempts */
158 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
159 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
161 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
162 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
163 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
165 MGMT_STATUS_FAILED, /* Unspecified Error */
166 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
167 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
168 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
169 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
170 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
171 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
172 MGMT_STATUS_FAILED, /* Unit Link Key Used */
173 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
174 MGMT_STATUS_TIMEOUT, /* Instant Passed */
175 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
176 MGMT_STATUS_FAILED, /* Transaction Collision */
177 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
178 MGMT_STATUS_REJECTED, /* QoS Rejected */
179 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
180 MGMT_STATUS_REJECTED, /* Insufficient Security */
181 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
182 MGMT_STATUS_BUSY, /* Role Switch Pending */
183 MGMT_STATUS_FAILED, /* Slot Violation */
184 MGMT_STATUS_FAILED, /* Role Switch Failed */
185 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
186 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
187 MGMT_STATUS_BUSY, /* Host Busy Pairing */
188 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
189 MGMT_STATUS_BUSY, /* Controller Busy */
190 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
191 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
192 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
193 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
194 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
197 bool mgmt_valid_hdev(struct hci_dev *hdev)
199 return hdev->dev_type == HCI_BREDR;
202 static u8 mgmt_status(u8 hci_status)
204 if (hci_status < ARRAY_SIZE(mgmt_status_table))
205 return mgmt_status_table[hci_status];
207 return MGMT_STATUS_FAILED;
210 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
213 struct mgmt_hdr *hdr;
214 struct mgmt_ev_cmd_status *ev;
217 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
219 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
223 hdr = (void *) skb_put(skb, sizeof(*hdr));
225 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
226 hdr->index = cpu_to_le16(index);
227 hdr->len = cpu_to_le16(sizeof(*ev));
229 ev = (void *) skb_put(skb, sizeof(*ev));
231 ev->opcode = cpu_to_le16(cmd);
233 err = sock_queue_rcv_skb(sk, skb);
240 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
241 void *rp, size_t rp_len)
244 struct mgmt_hdr *hdr;
245 struct mgmt_ev_cmd_complete *ev;
248 BT_DBG("sock %p", sk);
250 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
254 hdr = (void *) skb_put(skb, sizeof(*hdr));
256 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
257 hdr->index = cpu_to_le16(index);
258 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
260 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
261 ev->opcode = cpu_to_le16(cmd);
265 memcpy(ev->data, rp, rp_len);
267 err = sock_queue_rcv_skb(sk, skb);
274 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
277 struct mgmt_rp_read_version rp;
279 BT_DBG("sock %p", sk);
281 rp.version = MGMT_VERSION;
282 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
284 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
288 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
291 struct mgmt_rp_read_commands *rp;
292 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
293 const u16 num_events = ARRAY_SIZE(mgmt_events);
298 BT_DBG("sock %p", sk);
300 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
302 rp = kmalloc(rp_size, GFP_KERNEL);
306 rp->num_commands = __constant_cpu_to_le16(num_commands);
307 rp->num_events = __constant_cpu_to_le16(num_events);
309 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
310 put_unaligned_le16(mgmt_commands[i], opcode);
312 for (i = 0; i < num_events; i++, opcode++)
313 put_unaligned_le16(mgmt_events[i], opcode);
315 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
322 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
325 struct mgmt_rp_read_index_list *rp;
331 BT_DBG("sock %p", sk);
333 read_lock(&hci_dev_list_lock);
336 list_for_each_entry(d, &hci_dev_list, list) {
337 if (!mgmt_valid_hdev(d))
343 rp_len = sizeof(*rp) + (2 * count);
344 rp = kmalloc(rp_len, GFP_ATOMIC);
346 read_unlock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (test_bit(HCI_SETUP, &d->dev_flags))
355 if (!mgmt_valid_hdev(d))
358 rp->index[count++] = cpu_to_le16(d->id);
359 BT_DBG("Added hci%u", d->id);
362 rp->num_controllers = cpu_to_le16(count);
363 rp_len = sizeof(*rp) + (2 * count);
365 read_unlock(&hci_dev_list_lock);
367 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
375 static u32 get_supported_settings(struct hci_dev *hdev)
379 settings |= MGMT_SETTING_POWERED;
380 settings |= MGMT_SETTING_PAIRABLE;
382 if (lmp_ssp_capable(hdev))
383 settings |= MGMT_SETTING_SSP;
385 if (lmp_bredr_capable(hdev)) {
386 settings |= MGMT_SETTING_CONNECTABLE;
387 settings |= MGMT_SETTING_FAST_CONNECTABLE;
388 settings |= MGMT_SETTING_DISCOVERABLE;
389 settings |= MGMT_SETTING_BREDR;
390 settings |= MGMT_SETTING_LINK_SECURITY;
394 settings |= MGMT_SETTING_HS;
396 if (lmp_le_capable(hdev))
397 settings |= MGMT_SETTING_LE;
402 static u32 get_current_settings(struct hci_dev *hdev)
406 if (hdev_is_powered(hdev))
407 settings |= MGMT_SETTING_POWERED;
409 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_CONNECTABLE;
412 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
413 settings |= MGMT_SETTING_DISCOVERABLE;
415 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
416 settings |= MGMT_SETTING_PAIRABLE;
418 if (lmp_bredr_capable(hdev))
419 settings |= MGMT_SETTING_BREDR;
421 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
422 settings |= MGMT_SETTING_LE;
424 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
425 settings |= MGMT_SETTING_LINK_SECURITY;
427 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
428 settings |= MGMT_SETTING_SSP;
430 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
431 settings |= MGMT_SETTING_HS;
436 #define PNP_INFO_SVCLASS_ID 0x1200
438 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
440 u8 *ptr = data, *uuids_start = NULL;
441 struct bt_uuid *uuid;
446 list_for_each_entry(uuid, &hdev->uuids, list) {
449 if (uuid->size != 16)
452 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
456 if (uuid16 == PNP_INFO_SVCLASS_ID)
462 uuids_start[1] = EIR_UUID16_ALL;
466 /* Stop if not enough space to put next UUID */
467 if ((ptr - data) + sizeof(u16) > len) {
468 uuids_start[1] = EIR_UUID16_SOME;
472 *ptr++ = (uuid16 & 0x00ff);
473 *ptr++ = (uuid16 & 0xff00) >> 8;
474 uuids_start[0] += sizeof(uuid16);
480 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
482 u8 *ptr = data, *uuids_start = NULL;
483 struct bt_uuid *uuid;
488 list_for_each_entry(uuid, &hdev->uuids, list) {
489 if (uuid->size != 32)
495 uuids_start[1] = EIR_UUID32_ALL;
499 /* Stop if not enough space to put next UUID */
500 if ((ptr - data) + sizeof(u32) > len) {
501 uuids_start[1] = EIR_UUID32_SOME;
505 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
507 uuids_start[0] += sizeof(u32);
513 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515 u8 *ptr = data, *uuids_start = NULL;
516 struct bt_uuid *uuid;
521 list_for_each_entry(uuid, &hdev->uuids, list) {
522 if (uuid->size != 128)
528 uuids_start[1] = EIR_UUID128_ALL;
532 /* Stop if not enough space to put next UUID */
533 if ((ptr - data) + 16 > len) {
534 uuids_start[1] = EIR_UUID128_SOME;
538 memcpy(ptr, uuid->uuid, 16);
540 uuids_start[0] += 16;
546 static void create_eir(struct hci_dev *hdev, u8 *data)
551 name_len = strlen(hdev->dev_name);
557 ptr[1] = EIR_NAME_SHORT;
559 ptr[1] = EIR_NAME_COMPLETE;
561 /* EIR Data length */
562 ptr[0] = name_len + 1;
564 memcpy(ptr + 2, hdev->dev_name, name_len);
566 ptr += (name_len + 2);
569 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
571 ptr[1] = EIR_TX_POWER;
572 ptr[2] = (u8) hdev->inq_tx_power;
577 if (hdev->devid_source > 0) {
579 ptr[1] = EIR_DEVICE_ID;
581 put_unaligned_le16(hdev->devid_source, ptr + 2);
582 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
583 put_unaligned_le16(hdev->devid_product, ptr + 6);
584 put_unaligned_le16(hdev->devid_version, ptr + 8);
589 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
590 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
591 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
594 static void update_eir(struct hci_request *req)
596 struct hci_dev *hdev = req->hdev;
597 struct hci_cp_write_eir cp;
599 if (!hdev_is_powered(hdev))
602 if (!lmp_ext_inq_capable(hdev))
605 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
608 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
611 memset(&cp, 0, sizeof(cp));
613 create_eir(hdev, cp.data);
615 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
618 memcpy(hdev->eir, cp.data, sizeof(cp.data));
620 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
623 static u8 get_service_classes(struct hci_dev *hdev)
625 struct bt_uuid *uuid;
628 list_for_each_entry(uuid, &hdev->uuids, list)
629 val |= uuid->svc_hint;
634 static void update_class(struct hci_request *req)
636 struct hci_dev *hdev = req->hdev;
639 BT_DBG("%s", hdev->name);
641 if (!hdev_is_powered(hdev))
644 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
647 cod[0] = hdev->minor_class;
648 cod[1] = hdev->major_class;
649 cod[2] = get_service_classes(hdev);
651 if (memcmp(cod, hdev->dev_class, 3) == 0)
654 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
657 static void service_cache_off(struct work_struct *work)
659 struct hci_dev *hdev = container_of(work, struct hci_dev,
661 struct hci_request req;
663 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
666 hci_req_init(&req, hdev);
673 hci_dev_unlock(hdev);
675 hci_req_run(&req, NULL);
678 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
680 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
683 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
685 /* Non-mgmt controlled devices get this bit set
686 * implicitly so that pairing works for them, however
687 * for mgmt we require user-space to explicitly enable
690 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
693 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
694 void *data, u16 data_len)
696 struct mgmt_rp_read_info rp;
698 BT_DBG("sock %p %s", sk, hdev->name);
702 memset(&rp, 0, sizeof(rp));
704 bacpy(&rp.bdaddr, &hdev->bdaddr);
706 rp.version = hdev->hci_ver;
707 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
709 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
710 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
712 memcpy(rp.dev_class, hdev->dev_class, 3);
714 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
715 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
717 hci_dev_unlock(hdev);
719 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
723 static void mgmt_pending_free(struct pending_cmd *cmd)
730 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
731 struct hci_dev *hdev, void *data,
734 struct pending_cmd *cmd;
736 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
740 cmd->opcode = opcode;
741 cmd->index = hdev->id;
743 cmd->param = kmalloc(len, GFP_KERNEL);
750 memcpy(cmd->param, data, len);
755 list_add(&cmd->list, &hdev->mgmt_pending);
760 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
761 void (*cb)(struct pending_cmd *cmd,
765 struct pending_cmd *cmd, *tmp;
767 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
768 if (opcode > 0 && cmd->opcode != opcode)
775 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
777 struct pending_cmd *cmd;
779 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
780 if (cmd->opcode == opcode)
787 static void mgmt_pending_remove(struct pending_cmd *cmd)
789 list_del(&cmd->list);
790 mgmt_pending_free(cmd);
793 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
795 __le32 settings = cpu_to_le32(get_current_settings(hdev));
797 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
801 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
804 struct mgmt_mode *cp = data;
805 struct pending_cmd *cmd;
808 BT_DBG("request for %s", hdev->name);
810 if (cp->val != 0x00 && cp->val != 0x01)
811 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
812 MGMT_STATUS_INVALID_PARAMS);
816 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
817 cancel_delayed_work(&hdev->power_off);
820 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
822 err = mgmt_powered(hdev, 1);
827 if (!!cp->val == hdev_is_powered(hdev)) {
828 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
832 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
833 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
838 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
845 queue_work(hdev->req_workqueue, &hdev->power_on);
847 queue_work(hdev->req_workqueue, &hdev->power_off.work);
852 hci_dev_unlock(hdev);
856 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
857 struct sock *skip_sk)
860 struct mgmt_hdr *hdr;
862 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
866 hdr = (void *) skb_put(skb, sizeof(*hdr));
867 hdr->opcode = cpu_to_le16(event);
869 hdr->index = cpu_to_le16(hdev->id);
871 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
872 hdr->len = cpu_to_le16(data_len);
875 memcpy(skb_put(skb, data_len), data, data_len);
878 __net_timestamp(skb);
880 hci_send_to_control(skb, skip_sk);
886 static int new_settings(struct hci_dev *hdev, struct sock *skip)
890 ev = cpu_to_le32(get_current_settings(hdev));
892 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
895 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
898 struct mgmt_cp_set_discoverable *cp = data;
899 struct pending_cmd *cmd;
904 BT_DBG("request for %s", hdev->name);
906 if (!lmp_bredr_capable(hdev))
907 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
908 MGMT_STATUS_NOT_SUPPORTED);
910 if (cp->val != 0x00 && cp->val != 0x01)
911 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
912 MGMT_STATUS_INVALID_PARAMS);
914 timeout = __le16_to_cpu(cp->timeout);
915 if (!cp->val && timeout > 0)
916 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
917 MGMT_STATUS_INVALID_PARAMS);
921 if (!hdev_is_powered(hdev) && timeout > 0) {
922 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
923 MGMT_STATUS_NOT_POWERED);
927 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
928 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
929 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
934 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
935 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
936 MGMT_STATUS_REJECTED);
940 if (!hdev_is_powered(hdev)) {
941 bool changed = false;
943 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
944 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
948 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
953 err = new_settings(hdev, sk);
958 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
959 if (hdev->discov_timeout > 0) {
960 cancel_delayed_work(&hdev->discov_off);
961 hdev->discov_timeout = 0;
964 if (cp->val && timeout > 0) {
965 hdev->discov_timeout = timeout;
966 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
967 msecs_to_jiffies(hdev->discov_timeout * 1000));
970 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
974 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
983 scan |= SCAN_INQUIRY;
985 cancel_delayed_work(&hdev->discov_off);
987 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
989 mgmt_pending_remove(cmd);
992 hdev->discov_timeout = timeout;
995 hci_dev_unlock(hdev);
999 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1002 struct mgmt_mode *cp = data;
1003 struct pending_cmd *cmd;
1007 BT_DBG("request for %s", hdev->name);
1009 if (!lmp_bredr_capable(hdev))
1010 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1011 MGMT_STATUS_NOT_SUPPORTED);
1013 if (cp->val != 0x00 && cp->val != 0x01)
1014 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1015 MGMT_STATUS_INVALID_PARAMS);
1019 if (!hdev_is_powered(hdev)) {
1020 bool changed = false;
1022 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1026 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1028 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1029 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1032 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1037 err = new_settings(hdev, sk);
1042 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1043 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1044 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1049 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1050 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1054 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1065 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1066 hdev->discov_timeout > 0)
1067 cancel_delayed_work(&hdev->discov_off);
1070 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1072 mgmt_pending_remove(cmd);
1075 hci_dev_unlock(hdev);
1079 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1082 struct mgmt_mode *cp = data;
1085 BT_DBG("request for %s", hdev->name);
1087 if (cp->val != 0x00 && cp->val != 0x01)
1088 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1089 MGMT_STATUS_INVALID_PARAMS);
1094 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1096 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1098 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1102 err = new_settings(hdev, sk);
1105 hci_dev_unlock(hdev);
1109 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1112 struct mgmt_mode *cp = data;
1113 struct pending_cmd *cmd;
1117 BT_DBG("request for %s", hdev->name);
1119 if (!lmp_bredr_capable(hdev))
1120 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1121 MGMT_STATUS_NOT_SUPPORTED);
1123 if (cp->val != 0x00 && cp->val != 0x01)
1124 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1125 MGMT_STATUS_INVALID_PARAMS);
1129 if (!hdev_is_powered(hdev)) {
1130 bool changed = false;
1132 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1133 &hdev->dev_flags)) {
1134 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1138 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1143 err = new_settings(hdev, sk);
1148 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1149 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1156 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1157 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1161 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1167 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1169 mgmt_pending_remove(cmd);
1174 hci_dev_unlock(hdev);
1178 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1180 struct mgmt_mode *cp = data;
1181 struct pending_cmd *cmd;
1185 BT_DBG("request for %s", hdev->name);
1187 if (!lmp_ssp_capable(hdev))
1188 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1189 MGMT_STATUS_NOT_SUPPORTED);
1191 if (cp->val != 0x00 && cp->val != 0x01)
1192 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1193 MGMT_STATUS_INVALID_PARAMS);
1199 if (!hdev_is_powered(hdev)) {
1200 bool changed = false;
1202 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1203 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1207 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1212 err = new_settings(hdev, sk);
1217 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1218 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1223 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1224 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1228 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1234 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1236 mgmt_pending_remove(cmd);
1241 hci_dev_unlock(hdev);
1245 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1247 struct mgmt_mode *cp = data;
1249 BT_DBG("request for %s", hdev->name);
1252 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1253 MGMT_STATUS_NOT_SUPPORTED);
1255 if (cp->val != 0x00 && cp->val != 0x01)
1256 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1257 MGMT_STATUS_INVALID_PARAMS);
1260 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1262 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1264 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1267 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1269 struct mgmt_mode *cp = data;
1270 struct hci_cp_write_le_host_supported hci_cp;
1271 struct pending_cmd *cmd;
1275 BT_DBG("request for %s", hdev->name);
1277 if (!lmp_le_capable(hdev))
1278 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1279 MGMT_STATUS_NOT_SUPPORTED);
1281 if (cp->val != 0x00 && cp->val != 0x01)
1282 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1283 MGMT_STATUS_INVALID_PARAMS);
1288 enabled = lmp_host_le_capable(hdev);
1290 if (!hdev_is_powered(hdev) || val == enabled) {
1291 bool changed = false;
1293 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1294 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1298 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1303 err = new_settings(hdev, sk);
1308 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1309 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1314 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1320 memset(&hci_cp, 0, sizeof(hci_cp));
1324 hci_cp.simul = lmp_le_br_capable(hdev);
1327 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1330 mgmt_pending_remove(cmd);
1333 hci_dev_unlock(hdev);
1337 /* This is a helper function to test for pending mgmt commands that can
1338 * cause CoD or EIR HCI commands. We can only allow one such pending
1339 * mgmt command at a time since otherwise we cannot easily track what
1340 * the current values are, will be, and based on that calculate if a new
1341 * HCI command needs to be sent and if yes with what value.
1343 static bool pending_eir_or_class(struct hci_dev *hdev)
1345 struct pending_cmd *cmd;
1347 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1348 switch (cmd->opcode) {
1349 case MGMT_OP_ADD_UUID:
1350 case MGMT_OP_REMOVE_UUID:
1351 case MGMT_OP_SET_DEV_CLASS:
1352 case MGMT_OP_SET_POWERED:
1360 static const u8 bluetooth_base_uuid[] = {
1361 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1362 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1365 static u8 get_uuid_size(const u8 *uuid)
1369 if (memcmp(uuid, bluetooth_base_uuid, 12))
1372 val = get_unaligned_le32(&uuid[12]);
1379 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1381 struct pending_cmd *cmd;
1385 cmd = mgmt_pending_find(mgmt_op, hdev);
1389 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1390 hdev->dev_class, 3);
1392 mgmt_pending_remove(cmd);
1395 hci_dev_unlock(hdev);
1398 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1400 BT_DBG("status 0x%02x", status);
1402 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1405 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1407 struct mgmt_cp_add_uuid *cp = data;
1408 struct pending_cmd *cmd;
1409 struct hci_request req;
1410 struct bt_uuid *uuid;
1413 BT_DBG("request for %s", hdev->name);
1417 if (pending_eir_or_class(hdev)) {
1418 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1423 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1429 memcpy(uuid->uuid, cp->uuid, 16);
1430 uuid->svc_hint = cp->svc_hint;
1431 uuid->size = get_uuid_size(cp->uuid);
1433 list_add_tail(&uuid->list, &hdev->uuids);
1435 hci_req_init(&req, hdev);
1440 err = hci_req_run(&req, add_uuid_complete);
1442 if (err != -ENODATA)
1445 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1446 hdev->dev_class, 3);
1450 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1459 hci_dev_unlock(hdev);
1463 static bool enable_service_cache(struct hci_dev *hdev)
1465 if (!hdev_is_powered(hdev))
1468 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1469 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1477 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1479 BT_DBG("status 0x%02x", status);
1481 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1484 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1487 struct mgmt_cp_remove_uuid *cp = data;
1488 struct pending_cmd *cmd;
1489 struct bt_uuid *match, *tmp;
1490 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1491 struct hci_request req;
1494 BT_DBG("request for %s", hdev->name);
1498 if (pending_eir_or_class(hdev)) {
1499 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1504 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1505 err = hci_uuids_clear(hdev);
1507 if (enable_service_cache(hdev)) {
1508 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1509 0, hdev->dev_class, 3);
1518 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1519 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1522 list_del(&match->list);
1528 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1529 MGMT_STATUS_INVALID_PARAMS);
1534 hci_req_init(&req, hdev);
1539 err = hci_req_run(&req, remove_uuid_complete);
1541 if (err != -ENODATA)
1544 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1545 hdev->dev_class, 3);
1549 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1558 hci_dev_unlock(hdev);
1562 static void set_class_complete(struct hci_dev *hdev, u8 status)
1564 BT_DBG("status 0x%02x", status);
1566 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1569 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1572 struct mgmt_cp_set_dev_class *cp = data;
1573 struct pending_cmd *cmd;
1574 struct hci_request req;
1577 BT_DBG("request for %s", hdev->name);
1579 if (!lmp_bredr_capable(hdev))
1580 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1581 MGMT_STATUS_NOT_SUPPORTED);
1585 if (pending_eir_or_class(hdev)) {
1586 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1591 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1592 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1593 MGMT_STATUS_INVALID_PARAMS);
1597 hdev->major_class = cp->major;
1598 hdev->minor_class = cp->minor;
1600 if (!hdev_is_powered(hdev)) {
1601 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1602 hdev->dev_class, 3);
1606 hci_req_init(&req, hdev);
1608 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1609 hci_dev_unlock(hdev);
1610 cancel_delayed_work_sync(&hdev->service_cache);
1617 err = hci_req_run(&req, set_class_complete);
1619 if (err != -ENODATA)
1622 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1623 hdev->dev_class, 3);
1627 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1636 hci_dev_unlock(hdev);
1640 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1643 struct mgmt_cp_load_link_keys *cp = data;
1644 u16 key_count, expected_len;
1647 key_count = __le16_to_cpu(cp->key_count);
1649 expected_len = sizeof(*cp) + key_count *
1650 sizeof(struct mgmt_link_key_info);
1651 if (expected_len != len) {
1652 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1654 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1655 MGMT_STATUS_INVALID_PARAMS);
1658 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1659 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1660 MGMT_STATUS_INVALID_PARAMS);
1662 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1665 for (i = 0; i < key_count; i++) {
1666 struct mgmt_link_key_info *key = &cp->keys[i];
1668 if (key->addr.type != BDADDR_BREDR)
1669 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1670 MGMT_STATUS_INVALID_PARAMS);
1675 hci_link_keys_clear(hdev);
1677 set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1680 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1682 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1684 for (i = 0; i < key_count; i++) {
1685 struct mgmt_link_key_info *key = &cp->keys[i];
1687 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1688 key->type, key->pin_len);
1691 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1693 hci_dev_unlock(hdev);
1698 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1699 u8 addr_type, struct sock *skip_sk)
1701 struct mgmt_ev_device_unpaired ev;
1703 bacpy(&ev.addr.bdaddr, bdaddr);
1704 ev.addr.type = addr_type;
1706 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1710 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1713 struct mgmt_cp_unpair_device *cp = data;
1714 struct mgmt_rp_unpair_device rp;
1715 struct hci_cp_disconnect dc;
1716 struct pending_cmd *cmd;
1717 struct hci_conn *conn;
1720 memset(&rp, 0, sizeof(rp));
1721 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1722 rp.addr.type = cp->addr.type;
1724 if (!bdaddr_type_is_valid(cp->addr.type))
1725 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1726 MGMT_STATUS_INVALID_PARAMS,
1729 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1730 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1731 MGMT_STATUS_INVALID_PARAMS,
1736 if (!hdev_is_powered(hdev)) {
1737 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1738 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1742 if (cp->addr.type == BDADDR_BREDR)
1743 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1745 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1748 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1749 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1753 if (cp->disconnect) {
1754 if (cp->addr.type == BDADDR_BREDR)
1755 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1758 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1765 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1767 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1771 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1778 dc.handle = cpu_to_le16(conn->handle);
1779 dc.reason = 0x13; /* Remote User Terminated Connection */
1780 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1782 mgmt_pending_remove(cmd);
1785 hci_dev_unlock(hdev);
1789 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1792 struct mgmt_cp_disconnect *cp = data;
1793 struct mgmt_rp_disconnect rp;
1794 struct hci_cp_disconnect dc;
1795 struct pending_cmd *cmd;
1796 struct hci_conn *conn;
1801 memset(&rp, 0, sizeof(rp));
1802 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1803 rp.addr.type = cp->addr.type;
1805 if (!bdaddr_type_is_valid(cp->addr.type))
1806 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1807 MGMT_STATUS_INVALID_PARAMS,
1812 if (!test_bit(HCI_UP, &hdev->flags)) {
1813 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1814 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1818 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1819 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1820 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1824 if (cp->addr.type == BDADDR_BREDR)
1825 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1828 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1830 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1831 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1832 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1836 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1842 dc.handle = cpu_to_le16(conn->handle);
1843 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1845 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1847 mgmt_pending_remove(cmd);
1850 hci_dev_unlock(hdev);
1854 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1856 switch (link_type) {
1858 switch (addr_type) {
1859 case ADDR_LE_DEV_PUBLIC:
1860 return BDADDR_LE_PUBLIC;
1863 /* Fallback to LE Random address type */
1864 return BDADDR_LE_RANDOM;
1868 /* Fallback to BR/EDR type */
1869 return BDADDR_BREDR;
1873 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1876 struct mgmt_rp_get_connections *rp;
1886 if (!hdev_is_powered(hdev)) {
1887 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1888 MGMT_STATUS_NOT_POWERED);
1893 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1894 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1898 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1899 rp = kmalloc(rp_len, GFP_KERNEL);
1906 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1907 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1909 bacpy(&rp->addr[i].bdaddr, &c->dst);
1910 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1911 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1916 rp->conn_count = cpu_to_le16(i);
1918 /* Recalculate length in case of filtered SCO connections, etc */
1919 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1921 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1927 hci_dev_unlock(hdev);
1931 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1932 struct mgmt_cp_pin_code_neg_reply *cp)
1934 struct pending_cmd *cmd;
1937 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
1942 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1943 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
1945 mgmt_pending_remove(cmd);
1950 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
1953 struct hci_conn *conn;
1954 struct mgmt_cp_pin_code_reply *cp = data;
1955 struct hci_cp_pin_code_reply reply;
1956 struct pending_cmd *cmd;
1963 if (!hdev_is_powered(hdev)) {
1964 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1965 MGMT_STATUS_NOT_POWERED);
1969 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
1971 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1972 MGMT_STATUS_NOT_CONNECTED);
1976 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
1977 struct mgmt_cp_pin_code_neg_reply ncp;
1979 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
1981 BT_ERR("PIN code is not 16 bytes long");
1983 err = send_pin_code_neg_reply(sk, hdev, &ncp);
1985 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1986 MGMT_STATUS_INVALID_PARAMS);
1991 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
1997 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
1998 reply.pin_len = cp->pin_len;
1999 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2001 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2003 mgmt_pending_remove(cmd);
2006 hci_dev_unlock(hdev);
2010 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2013 struct mgmt_cp_set_io_capability *cp = data;
2019 hdev->io_capability = cp->io_capability;
2021 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2022 hdev->io_capability);
2024 hci_dev_unlock(hdev);
2026 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2030 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2032 struct hci_dev *hdev = conn->hdev;
2033 struct pending_cmd *cmd;
2035 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2036 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2039 if (cmd->user_data != conn)
2048 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2050 struct mgmt_rp_pair_device rp;
2051 struct hci_conn *conn = cmd->user_data;
2053 bacpy(&rp.addr.bdaddr, &conn->dst);
2054 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2056 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2059 /* So we don't get further callbacks for this connection */
2060 conn->connect_cfm_cb = NULL;
2061 conn->security_cfm_cb = NULL;
2062 conn->disconn_cfm_cb = NULL;
2066 mgmt_pending_remove(cmd);
2069 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2071 struct pending_cmd *cmd;
2073 BT_DBG("status %u", status);
2075 cmd = find_pairing(conn);
2077 BT_DBG("Unable to find a pending command");
2079 pairing_complete(cmd, mgmt_status(status));
2082 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2084 struct pending_cmd *cmd;
2086 BT_DBG("status %u", status);
2091 cmd = find_pairing(conn);
2093 BT_DBG("Unable to find a pending command");
2095 pairing_complete(cmd, mgmt_status(status));
2098 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2101 struct mgmt_cp_pair_device *cp = data;
2102 struct mgmt_rp_pair_device rp;
2103 struct pending_cmd *cmd;
2104 u8 sec_level, auth_type;
2105 struct hci_conn *conn;
2110 memset(&rp, 0, sizeof(rp));
2111 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2112 rp.addr.type = cp->addr.type;
2114 if (!bdaddr_type_is_valid(cp->addr.type))
2115 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2116 MGMT_STATUS_INVALID_PARAMS,
2121 if (!hdev_is_powered(hdev)) {
2122 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2123 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2127 sec_level = BT_SECURITY_MEDIUM;
2128 if (cp->io_cap == 0x03)
2129 auth_type = HCI_AT_DEDICATED_BONDING;
2131 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2133 if (cp->addr.type == BDADDR_BREDR)
2134 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2135 cp->addr.type, sec_level, auth_type);
2137 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2138 cp->addr.type, sec_level, auth_type);
2143 if (PTR_ERR(conn) == -EBUSY)
2144 status = MGMT_STATUS_BUSY;
2146 status = MGMT_STATUS_CONNECT_FAILED;
2148 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2154 if (conn->connect_cfm_cb) {
2156 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2157 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2161 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2168 /* For LE, just connecting isn't a proof that the pairing finished */
2169 if (cp->addr.type == BDADDR_BREDR)
2170 conn->connect_cfm_cb = pairing_complete_cb;
2172 conn->connect_cfm_cb = le_connect_complete_cb;
2174 conn->security_cfm_cb = pairing_complete_cb;
2175 conn->disconn_cfm_cb = pairing_complete_cb;
2176 conn->io_capability = cp->io_cap;
2177 cmd->user_data = conn;
2179 if (conn->state == BT_CONNECTED &&
2180 hci_conn_security(conn, sec_level, auth_type))
2181 pairing_complete(cmd, 0);
2186 hci_dev_unlock(hdev);
2190 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2193 struct mgmt_addr_info *addr = data;
2194 struct pending_cmd *cmd;
2195 struct hci_conn *conn;
2202 if (!hdev_is_powered(hdev)) {
2203 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2204 MGMT_STATUS_NOT_POWERED);
2208 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2210 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2211 MGMT_STATUS_INVALID_PARAMS);
2215 conn = cmd->user_data;
2217 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2218 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2219 MGMT_STATUS_INVALID_PARAMS);
2223 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2225 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2226 addr, sizeof(*addr));
2228 hci_dev_unlock(hdev);
2232 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2233 bdaddr_t *bdaddr, u8 type, u16 mgmt_op,
2234 u16 hci_op, __le32 passkey)
2236 struct pending_cmd *cmd;
2237 struct hci_conn *conn;
2242 if (!hdev_is_powered(hdev)) {
2243 err = cmd_status(sk, hdev->id, mgmt_op,
2244 MGMT_STATUS_NOT_POWERED);
2248 if (type == BDADDR_BREDR)
2249 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
2251 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
2254 err = cmd_status(sk, hdev->id, mgmt_op,
2255 MGMT_STATUS_NOT_CONNECTED);
2259 if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
2260 /* Continue with pairing via SMP */
2261 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2264 err = cmd_status(sk, hdev->id, mgmt_op,
2265 MGMT_STATUS_SUCCESS);
2267 err = cmd_status(sk, hdev->id, mgmt_op,
2268 MGMT_STATUS_FAILED);
2273 cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr));
2279 /* Continue with pairing via HCI */
2280 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2281 struct hci_cp_user_passkey_reply cp;
2283 bacpy(&cp.bdaddr, bdaddr);
2284 cp.passkey = passkey;
2285 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2287 err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr);
2290 mgmt_pending_remove(cmd);
2293 hci_dev_unlock(hdev);
2297 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2298 void *data, u16 len)
2300 struct mgmt_cp_pin_code_neg_reply *cp = data;
2304 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2305 MGMT_OP_PIN_CODE_NEG_REPLY,
2306 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2309 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2312 struct mgmt_cp_user_confirm_reply *cp = data;
2316 if (len != sizeof(*cp))
2317 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2318 MGMT_STATUS_INVALID_PARAMS);
2320 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2321 MGMT_OP_USER_CONFIRM_REPLY,
2322 HCI_OP_USER_CONFIRM_REPLY, 0);
2325 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2326 void *data, u16 len)
2328 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2332 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2333 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2334 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2337 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2340 struct mgmt_cp_user_passkey_reply *cp = data;
2344 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2345 MGMT_OP_USER_PASSKEY_REPLY,
2346 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2349 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2350 void *data, u16 len)
2352 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2356 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2357 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2358 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2361 static void update_name(struct hci_request *req)
2363 struct hci_dev *hdev = req->hdev;
2364 struct hci_cp_write_local_name cp;
2366 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2368 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2371 static void set_name_complete(struct hci_dev *hdev, u8 status)
2373 struct mgmt_cp_set_local_name *cp;
2374 struct pending_cmd *cmd;
2376 BT_DBG("status 0x%02x", status);
2380 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2387 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2388 mgmt_status(status));
2390 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2393 mgmt_pending_remove(cmd);
2396 hci_dev_unlock(hdev);
2399 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2402 struct mgmt_cp_set_local_name *cp = data;
2403 struct pending_cmd *cmd;
2404 struct hci_request req;
2411 /* If the old values are the same as the new ones just return a
2412 * direct command complete event.
2414 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2415 !memcmp(hdev->short_name, cp->short_name,
2416 sizeof(hdev->short_name))) {
2417 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2422 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2424 if (!hdev_is_powered(hdev)) {
2425 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2427 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2432 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2438 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2444 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2446 hci_req_init(&req, hdev);
2448 if (lmp_bredr_capable(hdev)) {
2453 if (lmp_le_capable(hdev))
2454 hci_update_ad(&req);
2456 err = hci_req_run(&req, set_name_complete);
2458 mgmt_pending_remove(cmd);
2461 hci_dev_unlock(hdev);
2465 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2466 void *data, u16 data_len)
2468 struct pending_cmd *cmd;
2471 BT_DBG("%s", hdev->name);
2475 if (!hdev_is_powered(hdev)) {
2476 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2477 MGMT_STATUS_NOT_POWERED);
2481 if (!lmp_ssp_capable(hdev)) {
2482 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2483 MGMT_STATUS_NOT_SUPPORTED);
2487 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2488 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2493 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2499 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2501 mgmt_pending_remove(cmd);
2504 hci_dev_unlock(hdev);
2508 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2509 void *data, u16 len)
2511 struct mgmt_cp_add_remote_oob_data *cp = data;
2515 BT_DBG("%s ", hdev->name);
2519 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2522 status = MGMT_STATUS_FAILED;
2524 status = MGMT_STATUS_SUCCESS;
2526 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2527 &cp->addr, sizeof(cp->addr));
2529 hci_dev_unlock(hdev);
2533 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2534 void *data, u16 len)
2536 struct mgmt_cp_remove_remote_oob_data *cp = data;
2540 BT_DBG("%s", hdev->name);
2544 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2546 status = MGMT_STATUS_INVALID_PARAMS;
2548 status = MGMT_STATUS_SUCCESS;
2550 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2551 status, &cp->addr, sizeof(cp->addr));
2553 hci_dev_unlock(hdev);
2557 int mgmt_interleaved_discovery(struct hci_dev *hdev)
2561 BT_DBG("%s", hdev->name);
2565 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
2567 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2569 hci_dev_unlock(hdev);
2574 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2575 void *data, u16 len)
2577 struct mgmt_cp_start_discovery *cp = data;
2578 struct pending_cmd *cmd;
2581 BT_DBG("%s", hdev->name);
2585 if (!hdev_is_powered(hdev)) {
2586 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2587 MGMT_STATUS_NOT_POWERED);
2591 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2592 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2597 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2598 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2603 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2609 hdev->discovery.type = cp->type;
2611 switch (hdev->discovery.type) {
2612 case DISCOV_TYPE_BREDR:
2613 if (!lmp_bredr_capable(hdev)) {
2614 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2615 MGMT_STATUS_NOT_SUPPORTED);
2616 mgmt_pending_remove(cmd);
2620 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2623 case DISCOV_TYPE_LE:
2624 if (!lmp_host_le_capable(hdev)) {
2625 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2626 MGMT_STATUS_NOT_SUPPORTED);
2627 mgmt_pending_remove(cmd);
2631 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2632 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2635 case DISCOV_TYPE_INTERLEAVED:
2636 if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
2637 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2638 MGMT_STATUS_NOT_SUPPORTED);
2639 mgmt_pending_remove(cmd);
2643 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, LE_SCAN_WIN,
2644 LE_SCAN_TIMEOUT_BREDR_LE);
2648 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2649 MGMT_STATUS_INVALID_PARAMS);
2650 mgmt_pending_remove(cmd);
2655 mgmt_pending_remove(cmd);
2657 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2660 hci_dev_unlock(hdev);
2664 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2667 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2668 struct pending_cmd *cmd;
2669 struct hci_cp_remote_name_req_cancel cp;
2670 struct inquiry_entry *e;
2673 BT_DBG("%s", hdev->name);
2677 if (!hci_discovery_active(hdev)) {
2678 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2679 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2680 sizeof(mgmt_cp->type));
2684 if (hdev->discovery.type != mgmt_cp->type) {
2685 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2686 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2687 sizeof(mgmt_cp->type));
2691 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2697 switch (hdev->discovery.state) {
2698 case DISCOVERY_FINDING:
2699 if (test_bit(HCI_INQUIRY, &hdev->flags))
2700 err = hci_cancel_inquiry(hdev);
2702 err = hci_cancel_le_scan(hdev);
2706 case DISCOVERY_RESOLVING:
2707 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2710 mgmt_pending_remove(cmd);
2711 err = cmd_complete(sk, hdev->id,
2712 MGMT_OP_STOP_DISCOVERY, 0,
2714 sizeof(mgmt_cp->type));
2715 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2719 bacpy(&cp.bdaddr, &e->data.bdaddr);
2720 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2726 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2731 mgmt_pending_remove(cmd);
2733 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2736 hci_dev_unlock(hdev);
2740 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2743 struct mgmt_cp_confirm_name *cp = data;
2744 struct inquiry_entry *e;
2747 BT_DBG("%s", hdev->name);
2751 if (!hci_discovery_active(hdev)) {
2752 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2753 MGMT_STATUS_FAILED);
2757 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2759 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2760 MGMT_STATUS_INVALID_PARAMS);
2764 if (cp->name_known) {
2765 e->name_state = NAME_KNOWN;
2768 e->name_state = NAME_NEEDED;
2769 hci_inquiry_cache_update_resolve(hdev, e);
2772 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2776 hci_dev_unlock(hdev);
2780 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2783 struct mgmt_cp_block_device *cp = data;
2787 BT_DBG("%s", hdev->name);
2789 if (!bdaddr_type_is_valid(cp->addr.type))
2790 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2791 MGMT_STATUS_INVALID_PARAMS,
2792 &cp->addr, sizeof(cp->addr));
2796 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2798 status = MGMT_STATUS_FAILED;
2800 status = MGMT_STATUS_SUCCESS;
2802 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2803 &cp->addr, sizeof(cp->addr));
2805 hci_dev_unlock(hdev);
2810 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2813 struct mgmt_cp_unblock_device *cp = data;
2817 BT_DBG("%s", hdev->name);
2819 if (!bdaddr_type_is_valid(cp->addr.type))
2820 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
2821 MGMT_STATUS_INVALID_PARAMS,
2822 &cp->addr, sizeof(cp->addr));
2826 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2828 status = MGMT_STATUS_INVALID_PARAMS;
2830 status = MGMT_STATUS_SUCCESS;
2832 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2833 &cp->addr, sizeof(cp->addr));
2835 hci_dev_unlock(hdev);
2840 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2843 struct mgmt_cp_set_device_id *cp = data;
2844 struct hci_request req;
2848 BT_DBG("%s", hdev->name);
2850 source = __le16_to_cpu(cp->source);
2852 if (source > 0x0002)
2853 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2854 MGMT_STATUS_INVALID_PARAMS);
2858 hdev->devid_source = source;
2859 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2860 hdev->devid_product = __le16_to_cpu(cp->product);
2861 hdev->devid_version = __le16_to_cpu(cp->version);
2863 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2865 hci_req_init(&req, hdev);
2867 hci_req_run(&req, NULL);
2869 hci_dev_unlock(hdev);
2874 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2875 void *data, u16 len)
2877 struct mgmt_mode *cp = data;
2878 struct hci_cp_write_page_scan_activity acp;
2882 BT_DBG("%s", hdev->name);
2884 if (!lmp_bredr_capable(hdev))
2885 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2886 MGMT_STATUS_NOT_SUPPORTED);
2888 if (cp->val != 0x00 && cp->val != 0x01)
2889 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2890 MGMT_STATUS_INVALID_PARAMS);
2892 if (!hdev_is_powered(hdev))
2893 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2894 MGMT_STATUS_NOT_POWERED);
2896 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2897 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2898 MGMT_STATUS_REJECTED);
2903 type = PAGE_SCAN_TYPE_INTERLACED;
2905 /* 160 msec page scan interval */
2906 acp.interval = __constant_cpu_to_le16(0x0100);
2908 type = PAGE_SCAN_TYPE_STANDARD; /* default */
2910 /* default 1.28 sec page scan */
2911 acp.interval = __constant_cpu_to_le16(0x0800);
2914 /* default 11.25 msec page scan window */
2915 acp.window = __constant_cpu_to_le16(0x0012);
2917 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp),
2920 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2921 MGMT_STATUS_FAILED);
2925 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
2927 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2928 MGMT_STATUS_FAILED);
2932 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 0,
2935 hci_dev_unlock(hdev);
2939 static bool ltk_is_valid(struct mgmt_ltk_info *key)
2941 if (key->authenticated != 0x00 && key->authenticated != 0x01)
2943 if (key->master != 0x00 && key->master != 0x01)
2945 if (!bdaddr_type_is_le(key->addr.type))
2950 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2951 void *cp_data, u16 len)
2953 struct mgmt_cp_load_long_term_keys *cp = cp_data;
2954 u16 key_count, expected_len;
2957 key_count = __le16_to_cpu(cp->key_count);
2959 expected_len = sizeof(*cp) + key_count *
2960 sizeof(struct mgmt_ltk_info);
2961 if (expected_len != len) {
2962 BT_ERR("load_keys: expected %u bytes, got %u bytes",
2964 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
2965 MGMT_STATUS_INVALID_PARAMS);
2968 BT_DBG("%s key_count %u", hdev->name, key_count);
2970 for (i = 0; i < key_count; i++) {
2971 struct mgmt_ltk_info *key = &cp->keys[i];
2973 if (!ltk_is_valid(key))
2974 return cmd_status(sk, hdev->id,
2975 MGMT_OP_LOAD_LONG_TERM_KEYS,
2976 MGMT_STATUS_INVALID_PARAMS);
2981 hci_smp_ltks_clear(hdev);
2983 for (i = 0; i < key_count; i++) {
2984 struct mgmt_ltk_info *key = &cp->keys[i];
2990 type = HCI_SMP_LTK_SLAVE;
2992 hci_add_ltk(hdev, &key->addr.bdaddr,
2993 bdaddr_to_le(key->addr.type),
2994 type, 0, key->authenticated, key->val,
2995 key->enc_size, key->ediv, key->rand);
2998 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3001 hci_dev_unlock(hdev);
3006 static const struct mgmt_handler {
3007 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3011 } mgmt_handlers[] = {
3012 { NULL }, /* 0x0000 (no command) */
3013 { read_version, false, MGMT_READ_VERSION_SIZE },
3014 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3015 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3016 { read_controller_info, false, MGMT_READ_INFO_SIZE },
3017 { set_powered, false, MGMT_SETTING_SIZE },
3018 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
3019 { set_connectable, false, MGMT_SETTING_SIZE },
3020 { set_fast_connectable, false, MGMT_SETTING_SIZE },
3021 { set_pairable, false, MGMT_SETTING_SIZE },
3022 { set_link_security, false, MGMT_SETTING_SIZE },
3023 { set_ssp, false, MGMT_SETTING_SIZE },
3024 { set_hs, false, MGMT_SETTING_SIZE },
3025 { set_le, false, MGMT_SETTING_SIZE },
3026 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
3027 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
3028 { add_uuid, false, MGMT_ADD_UUID_SIZE },
3029 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
3030 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
3031 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
3032 { disconnect, false, MGMT_DISCONNECT_SIZE },
3033 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
3034 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
3035 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
3036 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
3037 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
3038 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
3039 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
3040 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
3041 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
3042 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
3043 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
3044 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
3045 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
3046 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
3047 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3048 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3049 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3050 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3051 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3052 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3056 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3060 struct mgmt_hdr *hdr;
3061 u16 opcode, index, len;
3062 struct hci_dev *hdev = NULL;
3063 const struct mgmt_handler *handler;
3066 BT_DBG("got %zu bytes", msglen);
3068 if (msglen < sizeof(*hdr))
3071 buf = kmalloc(msglen, GFP_KERNEL);
3075 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3081 opcode = __le16_to_cpu(hdr->opcode);
3082 index = __le16_to_cpu(hdr->index);
3083 len = __le16_to_cpu(hdr->len);
3085 if (len != msglen - sizeof(*hdr)) {
3090 if (index != MGMT_INDEX_NONE) {
3091 hdev = hci_dev_get(index);
3093 err = cmd_status(sk, index, opcode,
3094 MGMT_STATUS_INVALID_INDEX);
3099 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3100 mgmt_handlers[opcode].func == NULL) {
3101 BT_DBG("Unknown op %u", opcode);
3102 err = cmd_status(sk, index, opcode,
3103 MGMT_STATUS_UNKNOWN_COMMAND);
3107 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3108 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3109 err = cmd_status(sk, index, opcode,
3110 MGMT_STATUS_INVALID_INDEX);
3114 handler = &mgmt_handlers[opcode];
3116 if ((handler->var_len && len < handler->data_len) ||
3117 (!handler->var_len && len != handler->data_len)) {
3118 err = cmd_status(sk, index, opcode,
3119 MGMT_STATUS_INVALID_PARAMS);
3124 mgmt_init_hdev(sk, hdev);
3126 cp = buf + sizeof(*hdr);
3128 err = handler->func(sk, hdev, cp, len);
3142 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
3146 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
3147 mgmt_pending_remove(cmd);
3150 int mgmt_index_added(struct hci_dev *hdev)
3152 if (!mgmt_valid_hdev(hdev))
3155 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3158 int mgmt_index_removed(struct hci_dev *hdev)
3160 u8 status = MGMT_STATUS_INVALID_INDEX;
3162 if (!mgmt_valid_hdev(hdev))
3165 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3167 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3172 struct hci_dev *hdev;
3176 static void settings_rsp(struct pending_cmd *cmd, void *data)
3178 struct cmd_lookup *match = data;
3180 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
3182 list_del(&cmd->list);
3184 if (match->sk == NULL) {
3185 match->sk = cmd->sk;
3186 sock_hold(match->sk);
3189 mgmt_pending_free(cmd);
3192 static void set_bredr_scan(struct hci_request *req)
3194 struct hci_dev *hdev = req->hdev;
3197 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3199 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3200 scan |= SCAN_INQUIRY;
3203 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3206 static void powered_complete(struct hci_dev *hdev, u8 status)
3208 struct cmd_lookup match = { NULL, hdev };
3210 BT_DBG("status 0x%02x", status);
3214 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3216 new_settings(hdev, match.sk);
3218 hci_dev_unlock(hdev);
3224 static int powered_update_hci(struct hci_dev *hdev)
3226 struct hci_request req;
3229 hci_req_init(&req, hdev);
3231 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3232 !lmp_host_ssp_capable(hdev)) {
3235 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3238 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
3239 struct hci_cp_write_le_host_supported cp;
3242 cp.simul = lmp_le_br_capable(hdev);
3244 /* Check first if we already have the right
3245 * host state (host features set)
3247 if (cp.le != lmp_host_le_capable(hdev) ||
3248 cp.simul != lmp_host_le_br_capable(hdev))
3249 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3253 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3254 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3255 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3256 sizeof(link_sec), &link_sec);
3258 if (lmp_bredr_capable(hdev)) {
3259 set_bredr_scan(&req);
3265 return hci_req_run(&req, powered_complete);
3268 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3270 struct cmd_lookup match = { NULL, hdev };
3271 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3272 u8 zero_cod[] = { 0, 0, 0 };
3275 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3279 if (powered_update_hci(hdev) == 0)
3282 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3287 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3288 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3290 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3291 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3292 zero_cod, sizeof(zero_cod), NULL);
3295 err = new_settings(hdev, match.sk);
3303 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3305 struct cmd_lookup match = { NULL, hdev };
3306 bool changed = false;
3310 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3313 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3317 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3321 err = new_settings(hdev, match.sk);
3329 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3331 struct cmd_lookup match = { NULL, hdev };
3332 bool changed = false;
3336 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3339 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3343 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp,
3347 err = new_settings(hdev, match.sk);
3355 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3357 u8 mgmt_err = mgmt_status(status);
3359 if (scan & SCAN_PAGE)
3360 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3361 cmd_status_rsp, &mgmt_err);
3363 if (scan & SCAN_INQUIRY)
3364 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3365 cmd_status_rsp, &mgmt_err);
3370 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3373 struct mgmt_ev_new_link_key ev;
3375 memset(&ev, 0, sizeof(ev));
3377 ev.store_hint = persistent;
3378 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3379 ev.key.addr.type = BDADDR_BREDR;
3380 ev.key.type = key->type;
3381 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3382 ev.key.pin_len = key->pin_len;
3384 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3387 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3389 struct mgmt_ev_new_long_term_key ev;
3391 memset(&ev, 0, sizeof(ev));
3393 ev.store_hint = persistent;
3394 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3395 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3396 ev.key.authenticated = key->authenticated;
3397 ev.key.enc_size = key->enc_size;
3398 ev.key.ediv = key->ediv;
3400 if (key->type == HCI_SMP_LTK)
3403 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3404 memcpy(ev.key.val, key->val, sizeof(key->val));
3406 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3410 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3411 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3415 struct mgmt_ev_device_connected *ev = (void *) buf;
3418 bacpy(&ev->addr.bdaddr, bdaddr);
3419 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3421 ev->flags = __cpu_to_le32(flags);
3424 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3427 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3428 eir_len = eir_append_data(ev->eir, eir_len,
3429 EIR_CLASS_OF_DEV, dev_class, 3);
3431 ev->eir_len = cpu_to_le16(eir_len);
3433 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3434 sizeof(*ev) + eir_len, NULL);
3437 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3439 struct mgmt_cp_disconnect *cp = cmd->param;
3440 struct sock **sk = data;
3441 struct mgmt_rp_disconnect rp;
3443 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3444 rp.addr.type = cp->addr.type;
3446 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3452 mgmt_pending_remove(cmd);
3455 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3457 struct hci_dev *hdev = data;
3458 struct mgmt_cp_unpair_device *cp = cmd->param;
3459 struct mgmt_rp_unpair_device rp;
3461 memset(&rp, 0, sizeof(rp));
3462 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3463 rp.addr.type = cp->addr.type;
3465 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3467 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3469 mgmt_pending_remove(cmd);
3472 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3473 u8 link_type, u8 addr_type, u8 reason)
3475 struct mgmt_ev_device_disconnected ev;
3476 struct sock *sk = NULL;
3479 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3481 bacpy(&ev.addr.bdaddr, bdaddr);
3482 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3485 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3491 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3497 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3498 u8 link_type, u8 addr_type, u8 status)
3500 struct mgmt_rp_disconnect rp;
3501 struct pending_cmd *cmd;
3504 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3507 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3511 bacpy(&rp.addr.bdaddr, bdaddr);
3512 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3514 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3515 mgmt_status(status), &rp, sizeof(rp));
3517 mgmt_pending_remove(cmd);
3522 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3523 u8 addr_type, u8 status)
3525 struct mgmt_ev_connect_failed ev;
3527 bacpy(&ev.addr.bdaddr, bdaddr);
3528 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3529 ev.status = mgmt_status(status);
3531 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3534 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3536 struct mgmt_ev_pin_code_request ev;
3538 bacpy(&ev.addr.bdaddr, bdaddr);
3539 ev.addr.type = BDADDR_BREDR;
3542 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3546 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3549 struct pending_cmd *cmd;
3550 struct mgmt_rp_pin_code_reply rp;
3553 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3557 bacpy(&rp.addr.bdaddr, bdaddr);
3558 rp.addr.type = BDADDR_BREDR;
3560 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3561 mgmt_status(status), &rp, sizeof(rp));
3563 mgmt_pending_remove(cmd);
3568 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3571 struct pending_cmd *cmd;
3572 struct mgmt_rp_pin_code_reply rp;
3575 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3579 bacpy(&rp.addr.bdaddr, bdaddr);
3580 rp.addr.type = BDADDR_BREDR;
3582 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3583 mgmt_status(status), &rp, sizeof(rp));
3585 mgmt_pending_remove(cmd);
3590 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3591 u8 link_type, u8 addr_type, __le32 value,
3594 struct mgmt_ev_user_confirm_request ev;
3596 BT_DBG("%s", hdev->name);
3598 bacpy(&ev.addr.bdaddr, bdaddr);
3599 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3600 ev.confirm_hint = confirm_hint;
3603 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3607 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3608 u8 link_type, u8 addr_type)
3610 struct mgmt_ev_user_passkey_request ev;
3612 BT_DBG("%s", hdev->name);
3614 bacpy(&ev.addr.bdaddr, bdaddr);
3615 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3617 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3621 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3622 u8 link_type, u8 addr_type, u8 status,
3625 struct pending_cmd *cmd;
3626 struct mgmt_rp_user_confirm_reply rp;
3629 cmd = mgmt_pending_find(opcode, hdev);
3633 bacpy(&rp.addr.bdaddr, bdaddr);
3634 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3635 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3638 mgmt_pending_remove(cmd);
3643 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3644 u8 link_type, u8 addr_type, u8 status)
3646 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3647 status, MGMT_OP_USER_CONFIRM_REPLY);
3650 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3651 u8 link_type, u8 addr_type, u8 status)
3653 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3655 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3658 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3659 u8 link_type, u8 addr_type, u8 status)
3661 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3662 status, MGMT_OP_USER_PASSKEY_REPLY);
3665 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3666 u8 link_type, u8 addr_type, u8 status)
3668 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3670 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3673 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3674 u8 link_type, u8 addr_type, u32 passkey,
3677 struct mgmt_ev_passkey_notify ev;
3679 BT_DBG("%s", hdev->name);
3681 bacpy(&ev.addr.bdaddr, bdaddr);
3682 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3683 ev.passkey = __cpu_to_le32(passkey);
3684 ev.entered = entered;
3686 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3689 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3690 u8 addr_type, u8 status)
3692 struct mgmt_ev_auth_failed ev;
3694 bacpy(&ev.addr.bdaddr, bdaddr);
3695 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3696 ev.status = mgmt_status(status);
3698 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3701 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3703 struct cmd_lookup match = { NULL, hdev };
3704 bool changed = false;
3708 u8 mgmt_err = mgmt_status(status);
3709 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3710 cmd_status_rsp, &mgmt_err);
3714 if (test_bit(HCI_AUTH, &hdev->flags)) {
3715 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3718 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3722 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3726 err = new_settings(hdev, match.sk);
3734 static void clear_eir(struct hci_request *req)
3736 struct hci_dev *hdev = req->hdev;
3737 struct hci_cp_write_eir cp;
3739 if (!lmp_ext_inq_capable(hdev))
3742 memset(hdev->eir, 0, sizeof(hdev->eir));
3744 memset(&cp, 0, sizeof(cp));
3746 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3749 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3751 struct cmd_lookup match = { NULL, hdev };
3752 struct hci_request req;
3753 bool changed = false;
3757 u8 mgmt_err = mgmt_status(status);
3759 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
3761 err = new_settings(hdev, NULL);
3763 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3770 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3773 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3777 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
3780 err = new_settings(hdev, match.sk);
3785 hci_req_init(&req, hdev);
3787 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3792 hci_req_run(&req, NULL);
3797 static void sk_lookup(struct pending_cmd *cmd, void *data)
3799 struct cmd_lookup *match = data;
3801 if (match->sk == NULL) {
3802 match->sk = cmd->sk;
3803 sock_hold(match->sk);
3807 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3810 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3813 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
3814 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
3815 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
3818 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3827 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
3829 struct mgmt_cp_set_local_name ev;
3830 struct pending_cmd *cmd;
3835 memset(&ev, 0, sizeof(ev));
3836 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3837 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
3839 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3841 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3843 /* If this is a HCI command related to powering on the
3844 * HCI dev don't send any mgmt signals.
3846 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
3850 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
3851 cmd ? cmd->sk : NULL);
3854 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
3855 u8 *randomizer, u8 status)
3857 struct pending_cmd *cmd;
3860 BT_DBG("%s status %u", hdev->name, status);
3862 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3867 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3868 mgmt_status(status));
3870 struct mgmt_rp_read_local_oob_data rp;
3872 memcpy(rp.hash, hash, sizeof(rp.hash));
3873 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
3875 err = cmd_complete(cmd->sk, hdev->id,
3876 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
3880 mgmt_pending_remove(cmd);
3885 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3887 struct cmd_lookup match = { NULL, hdev };
3888 bool changed = false;
3892 u8 mgmt_err = mgmt_status(status);
3894 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
3896 err = new_settings(hdev, NULL);
3898 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
3905 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3908 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3912 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
3915 err = new_settings(hdev, match.sk);
3923 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3924 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
3925 ssp, u8 *eir, u16 eir_len)
3928 struct mgmt_ev_device_found *ev = (void *) buf;
3931 /* Leave 5 bytes for a potential CoD field */
3932 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
3935 memset(buf, 0, sizeof(buf));
3937 bacpy(&ev->addr.bdaddr, bdaddr);
3938 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3941 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
3943 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
3946 memcpy(ev->eir, eir, eir_len);
3948 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
3949 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
3952 ev->eir_len = cpu_to_le16(eir_len);
3953 ev_size = sizeof(*ev) + eir_len;
3955 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
3958 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3959 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
3961 struct mgmt_ev_device_found *ev;
3962 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
3965 ev = (struct mgmt_ev_device_found *) buf;
3967 memset(buf, 0, sizeof(buf));
3969 bacpy(&ev->addr.bdaddr, bdaddr);
3970 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3973 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
3976 ev->eir_len = cpu_to_le16(eir_len);
3978 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
3979 sizeof(*ev) + eir_len, NULL);
3982 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3984 struct pending_cmd *cmd;
3988 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3990 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3994 type = hdev->discovery.type;
3996 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3997 &type, sizeof(type));
3998 mgmt_pending_remove(cmd);
4003 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
4005 struct pending_cmd *cmd;
4008 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4012 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4013 &hdev->discovery.type, sizeof(hdev->discovery.type));
4014 mgmt_pending_remove(cmd);
4019 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4021 struct mgmt_ev_discovering ev;
4022 struct pending_cmd *cmd;
4024 BT_DBG("%s discovering %u", hdev->name, discovering);
4027 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4029 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4032 u8 type = hdev->discovery.type;
4034 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4036 mgmt_pending_remove(cmd);
4039 memset(&ev, 0, sizeof(ev));
4040 ev.type = hdev->discovery.type;
4041 ev.discovering = discovering;
4043 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4046 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4048 struct pending_cmd *cmd;
4049 struct mgmt_ev_device_blocked ev;
4051 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4053 bacpy(&ev.addr.bdaddr, bdaddr);
4054 ev.addr.type = type;
4056 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4057 cmd ? cmd->sk : NULL);
4060 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4062 struct pending_cmd *cmd;
4063 struct mgmt_ev_device_unblocked ev;
4065 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4067 bacpy(&ev.addr.bdaddr, bdaddr);
4068 ev.addr.type = type;
4070 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4071 cmd ? cmd->sk : NULL);
4074 module_param(enable_hs, bool, 0644);
4075 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");