2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
84 MGMT_EV_INDEX_REMOVED,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
106 * These LE scan and inquiry parameters were chosen according to LE General
107 * Discovery Procedure specification.
109 #define LE_SCAN_TYPE 0x01
110 #define LE_SCAN_WIN 0x12
111 #define LE_SCAN_INT 0x12
112 #define LE_SCAN_TIMEOUT_LE_ONLY 10240 /* TGAP(gen_disc_scan_min) */
113 #define LE_SCAN_TIMEOUT_BREDR_LE 5120 /* TGAP(100)/2 */
115 #define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
116 #define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
118 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
120 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
121 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
124 struct list_head list;
132 /* HCI to MGMT error code conversion table */
133 static u8 mgmt_status_table[] = {
135 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
136 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
137 MGMT_STATUS_FAILED, /* Hardware Failure */
138 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
139 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
140 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
141 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
142 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
143 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
144 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
145 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
146 MGMT_STATUS_BUSY, /* Command Disallowed */
147 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
148 MGMT_STATUS_REJECTED, /* Rejected Security */
149 MGMT_STATUS_REJECTED, /* Rejected Personal */
150 MGMT_STATUS_TIMEOUT, /* Host Timeout */
151 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
152 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
153 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
154 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
155 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
156 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
157 MGMT_STATUS_BUSY, /* Repeated Attempts */
158 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
159 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
161 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
162 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
163 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
165 MGMT_STATUS_FAILED, /* Unspecified Error */
166 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
167 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
168 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
169 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
170 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
171 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
172 MGMT_STATUS_FAILED, /* Unit Link Key Used */
173 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
174 MGMT_STATUS_TIMEOUT, /* Instant Passed */
175 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
176 MGMT_STATUS_FAILED, /* Transaction Collision */
177 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
178 MGMT_STATUS_REJECTED, /* QoS Rejected */
179 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
180 MGMT_STATUS_REJECTED, /* Insufficient Security */
181 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
182 MGMT_STATUS_BUSY, /* Role Switch Pending */
183 MGMT_STATUS_FAILED, /* Slot Violation */
184 MGMT_STATUS_FAILED, /* Role Switch Failed */
185 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
186 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
187 MGMT_STATUS_BUSY, /* Host Busy Pairing */
188 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
189 MGMT_STATUS_BUSY, /* Controller Busy */
190 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
191 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
192 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
193 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
194 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
197 bool mgmt_valid_hdev(struct hci_dev *hdev)
199 return hdev->dev_type == HCI_BREDR;
202 static u8 mgmt_status(u8 hci_status)
204 if (hci_status < ARRAY_SIZE(mgmt_status_table))
205 return mgmt_status_table[hci_status];
207 return MGMT_STATUS_FAILED;
210 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
213 struct mgmt_hdr *hdr;
214 struct mgmt_ev_cmd_status *ev;
217 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
219 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
223 hdr = (void *) skb_put(skb, sizeof(*hdr));
225 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
226 hdr->index = cpu_to_le16(index);
227 hdr->len = cpu_to_le16(sizeof(*ev));
229 ev = (void *) skb_put(skb, sizeof(*ev));
231 ev->opcode = cpu_to_le16(cmd);
233 err = sock_queue_rcv_skb(sk, skb);
240 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
241 void *rp, size_t rp_len)
244 struct mgmt_hdr *hdr;
245 struct mgmt_ev_cmd_complete *ev;
248 BT_DBG("sock %p", sk);
250 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
254 hdr = (void *) skb_put(skb, sizeof(*hdr));
256 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
257 hdr->index = cpu_to_le16(index);
258 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
260 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
261 ev->opcode = cpu_to_le16(cmd);
265 memcpy(ev->data, rp, rp_len);
267 err = sock_queue_rcv_skb(sk, skb);
274 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
277 struct mgmt_rp_read_version rp;
279 BT_DBG("sock %p", sk);
281 rp.version = MGMT_VERSION;
282 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
284 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
288 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
291 struct mgmt_rp_read_commands *rp;
292 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
293 const u16 num_events = ARRAY_SIZE(mgmt_events);
298 BT_DBG("sock %p", sk);
300 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
302 rp = kmalloc(rp_size, GFP_KERNEL);
306 rp->num_commands = __constant_cpu_to_le16(num_commands);
307 rp->num_events = __constant_cpu_to_le16(num_events);
309 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
310 put_unaligned_le16(mgmt_commands[i], opcode);
312 for (i = 0; i < num_events; i++, opcode++)
313 put_unaligned_le16(mgmt_events[i], opcode);
315 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
322 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
325 struct mgmt_rp_read_index_list *rp;
331 BT_DBG("sock %p", sk);
333 read_lock(&hci_dev_list_lock);
336 list_for_each_entry(d, &hci_dev_list, list) {
337 if (!mgmt_valid_hdev(d))
343 rp_len = sizeof(*rp) + (2 * count);
344 rp = kmalloc(rp_len, GFP_ATOMIC);
346 read_unlock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (test_bit(HCI_SETUP, &d->dev_flags))
355 if (!mgmt_valid_hdev(d))
358 rp->index[count++] = cpu_to_le16(d->id);
359 BT_DBG("Added hci%u", d->id);
362 rp->num_controllers = cpu_to_le16(count);
363 rp_len = sizeof(*rp) + (2 * count);
365 read_unlock(&hci_dev_list_lock);
367 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
375 static u32 get_supported_settings(struct hci_dev *hdev)
379 settings |= MGMT_SETTING_POWERED;
380 settings |= MGMT_SETTING_PAIRABLE;
382 if (lmp_ssp_capable(hdev))
383 settings |= MGMT_SETTING_SSP;
385 if (lmp_bredr_capable(hdev)) {
386 settings |= MGMT_SETTING_CONNECTABLE;
387 settings |= MGMT_SETTING_FAST_CONNECTABLE;
388 settings |= MGMT_SETTING_DISCOVERABLE;
389 settings |= MGMT_SETTING_BREDR;
390 settings |= MGMT_SETTING_LINK_SECURITY;
394 settings |= MGMT_SETTING_HS;
396 if (lmp_le_capable(hdev))
397 settings |= MGMT_SETTING_LE;
402 static u32 get_current_settings(struct hci_dev *hdev)
406 if (hdev_is_powered(hdev))
407 settings |= MGMT_SETTING_POWERED;
409 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_CONNECTABLE;
412 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
413 settings |= MGMT_SETTING_DISCOVERABLE;
415 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
416 settings |= MGMT_SETTING_PAIRABLE;
418 if (lmp_bredr_capable(hdev))
419 settings |= MGMT_SETTING_BREDR;
421 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
422 settings |= MGMT_SETTING_LE;
424 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
425 settings |= MGMT_SETTING_LINK_SECURITY;
427 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
428 settings |= MGMT_SETTING_SSP;
430 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
431 settings |= MGMT_SETTING_HS;
436 #define PNP_INFO_SVCLASS_ID 0x1200
438 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
440 u8 *ptr = data, *uuids_start = NULL;
441 struct bt_uuid *uuid;
446 list_for_each_entry(uuid, &hdev->uuids, list) {
449 if (uuid->size != 16)
452 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
456 if (uuid16 == PNP_INFO_SVCLASS_ID)
462 uuids_start[1] = EIR_UUID16_ALL;
466 /* Stop if not enough space to put next UUID */
467 if ((ptr - data) + sizeof(u16) > len) {
468 uuids_start[1] = EIR_UUID16_SOME;
472 *ptr++ = (uuid16 & 0x00ff);
473 *ptr++ = (uuid16 & 0xff00) >> 8;
474 uuids_start[0] += sizeof(uuid16);
480 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
482 u8 *ptr = data, *uuids_start = NULL;
483 struct bt_uuid *uuid;
488 list_for_each_entry(uuid, &hdev->uuids, list) {
489 if (uuid->size != 32)
495 uuids_start[1] = EIR_UUID32_ALL;
499 /* Stop if not enough space to put next UUID */
500 if ((ptr - data) + sizeof(u32) > len) {
501 uuids_start[1] = EIR_UUID32_SOME;
505 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
507 uuids_start[0] += sizeof(u32);
513 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515 u8 *ptr = data, *uuids_start = NULL;
516 struct bt_uuid *uuid;
521 list_for_each_entry(uuid, &hdev->uuids, list) {
522 if (uuid->size != 128)
528 uuids_start[1] = EIR_UUID128_ALL;
532 /* Stop if not enough space to put next UUID */
533 if ((ptr - data) + 16 > len) {
534 uuids_start[1] = EIR_UUID128_SOME;
538 memcpy(ptr, uuid->uuid, 16);
540 uuids_start[0] += 16;
546 static void create_eir(struct hci_dev *hdev, u8 *data)
551 name_len = strlen(hdev->dev_name);
557 ptr[1] = EIR_NAME_SHORT;
559 ptr[1] = EIR_NAME_COMPLETE;
561 /* EIR Data length */
562 ptr[0] = name_len + 1;
564 memcpy(ptr + 2, hdev->dev_name, name_len);
566 ptr += (name_len + 2);
569 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
571 ptr[1] = EIR_TX_POWER;
572 ptr[2] = (u8) hdev->inq_tx_power;
577 if (hdev->devid_source > 0) {
579 ptr[1] = EIR_DEVICE_ID;
581 put_unaligned_le16(hdev->devid_source, ptr + 2);
582 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
583 put_unaligned_le16(hdev->devid_product, ptr + 6);
584 put_unaligned_le16(hdev->devid_version, ptr + 8);
589 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
590 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
591 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
594 static void update_eir(struct hci_request *req)
596 struct hci_dev *hdev = req->hdev;
597 struct hci_cp_write_eir cp;
599 if (!hdev_is_powered(hdev))
602 if (!lmp_ext_inq_capable(hdev))
605 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
608 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
611 memset(&cp, 0, sizeof(cp));
613 create_eir(hdev, cp.data);
615 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
618 memcpy(hdev->eir, cp.data, sizeof(cp.data));
620 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
623 static u8 get_service_classes(struct hci_dev *hdev)
625 struct bt_uuid *uuid;
628 list_for_each_entry(uuid, &hdev->uuids, list)
629 val |= uuid->svc_hint;
634 static void update_class(struct hci_request *req)
636 struct hci_dev *hdev = req->hdev;
639 BT_DBG("%s", hdev->name);
641 if (!hdev_is_powered(hdev))
644 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
647 cod[0] = hdev->minor_class;
648 cod[1] = hdev->major_class;
649 cod[2] = get_service_classes(hdev);
651 if (memcmp(cod, hdev->dev_class, 3) == 0)
654 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
657 static void service_cache_off(struct work_struct *work)
659 struct hci_dev *hdev = container_of(work, struct hci_dev,
661 struct hci_request req;
663 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
666 hci_req_init(&req, hdev);
673 hci_dev_unlock(hdev);
675 hci_req_run(&req, NULL);
678 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
680 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
683 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
685 /* Non-mgmt controlled devices get this bit set
686 * implicitly so that pairing works for them, however
687 * for mgmt we require user-space to explicitly enable
690 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
693 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
694 void *data, u16 data_len)
696 struct mgmt_rp_read_info rp;
698 BT_DBG("sock %p %s", sk, hdev->name);
702 memset(&rp, 0, sizeof(rp));
704 bacpy(&rp.bdaddr, &hdev->bdaddr);
706 rp.version = hdev->hci_ver;
707 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
709 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
710 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
712 memcpy(rp.dev_class, hdev->dev_class, 3);
714 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
715 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
717 hci_dev_unlock(hdev);
719 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
723 static void mgmt_pending_free(struct pending_cmd *cmd)
730 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
731 struct hci_dev *hdev, void *data,
734 struct pending_cmd *cmd;
736 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
740 cmd->opcode = opcode;
741 cmd->index = hdev->id;
743 cmd->param = kmalloc(len, GFP_KERNEL);
750 memcpy(cmd->param, data, len);
755 list_add(&cmd->list, &hdev->mgmt_pending);
760 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
761 void (*cb)(struct pending_cmd *cmd,
765 struct pending_cmd *cmd, *tmp;
767 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
768 if (opcode > 0 && cmd->opcode != opcode)
775 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
777 struct pending_cmd *cmd;
779 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
780 if (cmd->opcode == opcode)
787 static void mgmt_pending_remove(struct pending_cmd *cmd)
789 list_del(&cmd->list);
790 mgmt_pending_free(cmd);
793 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
795 __le32 settings = cpu_to_le32(get_current_settings(hdev));
797 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
801 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
804 struct mgmt_mode *cp = data;
805 struct pending_cmd *cmd;
808 BT_DBG("request for %s", hdev->name);
810 if (cp->val != 0x00 && cp->val != 0x01)
811 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
812 MGMT_STATUS_INVALID_PARAMS);
816 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
817 cancel_delayed_work(&hdev->power_off);
820 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
822 err = mgmt_powered(hdev, 1);
827 if (!!cp->val == hdev_is_powered(hdev)) {
828 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
832 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
833 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
838 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
845 queue_work(hdev->req_workqueue, &hdev->power_on);
847 queue_work(hdev->req_workqueue, &hdev->power_off.work);
852 hci_dev_unlock(hdev);
856 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
857 struct sock *skip_sk)
860 struct mgmt_hdr *hdr;
862 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
866 hdr = (void *) skb_put(skb, sizeof(*hdr));
867 hdr->opcode = cpu_to_le16(event);
869 hdr->index = cpu_to_le16(hdev->id);
871 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
872 hdr->len = cpu_to_le16(data_len);
875 memcpy(skb_put(skb, data_len), data, data_len);
878 __net_timestamp(skb);
880 hci_send_to_control(skb, skip_sk);
886 static int new_settings(struct hci_dev *hdev, struct sock *skip)
890 ev = cpu_to_le32(get_current_settings(hdev));
892 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
895 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
898 struct mgmt_cp_set_discoverable *cp = data;
899 struct pending_cmd *cmd;
904 BT_DBG("request for %s", hdev->name);
906 if (!lmp_bredr_capable(hdev))
907 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
908 MGMT_STATUS_NOT_SUPPORTED);
910 if (cp->val != 0x00 && cp->val != 0x01)
911 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
912 MGMT_STATUS_INVALID_PARAMS);
914 timeout = __le16_to_cpu(cp->timeout);
915 if (!cp->val && timeout > 0)
916 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
917 MGMT_STATUS_INVALID_PARAMS);
921 if (!hdev_is_powered(hdev) && timeout > 0) {
922 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
923 MGMT_STATUS_NOT_POWERED);
927 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
928 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
929 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
934 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
935 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
936 MGMT_STATUS_REJECTED);
940 if (!hdev_is_powered(hdev)) {
941 bool changed = false;
943 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
944 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
948 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
953 err = new_settings(hdev, sk);
958 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
959 if (hdev->discov_timeout > 0) {
960 cancel_delayed_work(&hdev->discov_off);
961 hdev->discov_timeout = 0;
964 if (cp->val && timeout > 0) {
965 hdev->discov_timeout = timeout;
966 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
967 msecs_to_jiffies(hdev->discov_timeout * 1000));
970 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
974 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
983 scan |= SCAN_INQUIRY;
985 cancel_delayed_work(&hdev->discov_off);
987 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
989 mgmt_pending_remove(cmd);
992 hdev->discov_timeout = timeout;
995 hci_dev_unlock(hdev);
999 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1001 struct pending_cmd *cmd;
1003 BT_DBG("status 0x%02x", status);
1007 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1011 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1013 mgmt_pending_remove(cmd);
1016 hci_dev_unlock(hdev);
1019 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1022 struct mgmt_mode *cp = data;
1023 struct pending_cmd *cmd;
1024 struct hci_request req;
1028 BT_DBG("request for %s", hdev->name);
1030 if (!lmp_bredr_capable(hdev))
1031 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1032 MGMT_STATUS_NOT_SUPPORTED);
1034 if (cp->val != 0x00 && cp->val != 0x01)
1035 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1036 MGMT_STATUS_INVALID_PARAMS);
1040 if (!hdev_is_powered(hdev)) {
1041 bool changed = false;
1043 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1047 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1049 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1050 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1053 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1058 err = new_settings(hdev, sk);
1063 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1064 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1065 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1070 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1071 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1075 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1086 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1087 hdev->discov_timeout > 0)
1088 cancel_delayed_work(&hdev->discov_off);
1091 hci_req_init(&req, hdev);
1093 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1095 err = hci_req_run(&req, set_connectable_complete);
1097 mgmt_pending_remove(cmd);
1100 hci_dev_unlock(hdev);
1104 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1107 struct mgmt_mode *cp = data;
1110 BT_DBG("request for %s", hdev->name);
1112 if (cp->val != 0x00 && cp->val != 0x01)
1113 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1114 MGMT_STATUS_INVALID_PARAMS);
1119 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1121 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1123 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1127 err = new_settings(hdev, sk);
1130 hci_dev_unlock(hdev);
1134 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1137 struct mgmt_mode *cp = data;
1138 struct pending_cmd *cmd;
1142 BT_DBG("request for %s", hdev->name);
1144 if (!lmp_bredr_capable(hdev))
1145 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1146 MGMT_STATUS_NOT_SUPPORTED);
1148 if (cp->val != 0x00 && cp->val != 0x01)
1149 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1150 MGMT_STATUS_INVALID_PARAMS);
1154 if (!hdev_is_powered(hdev)) {
1155 bool changed = false;
1157 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1158 &hdev->dev_flags)) {
1159 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1163 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1168 err = new_settings(hdev, sk);
1173 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1174 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1181 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1182 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1186 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1192 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1194 mgmt_pending_remove(cmd);
1199 hci_dev_unlock(hdev);
1203 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1205 struct mgmt_mode *cp = data;
1206 struct pending_cmd *cmd;
1210 BT_DBG("request for %s", hdev->name);
1212 if (!lmp_ssp_capable(hdev))
1213 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1214 MGMT_STATUS_NOT_SUPPORTED);
1216 if (cp->val != 0x00 && cp->val != 0x01)
1217 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1218 MGMT_STATUS_INVALID_PARAMS);
1224 if (!hdev_is_powered(hdev)) {
1225 bool changed = false;
1227 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1228 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1232 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1237 err = new_settings(hdev, sk);
1242 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1243 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1248 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1249 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1253 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1259 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1261 mgmt_pending_remove(cmd);
1266 hci_dev_unlock(hdev);
1270 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1272 struct mgmt_mode *cp = data;
1274 BT_DBG("request for %s", hdev->name);
1277 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1278 MGMT_STATUS_NOT_SUPPORTED);
1280 if (cp->val != 0x00 && cp->val != 0x01)
1281 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1282 MGMT_STATUS_INVALID_PARAMS);
1285 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1287 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1289 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1292 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1294 struct mgmt_mode *cp = data;
1295 struct hci_cp_write_le_host_supported hci_cp;
1296 struct pending_cmd *cmd;
1300 BT_DBG("request for %s", hdev->name);
1302 if (!lmp_le_capable(hdev))
1303 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1304 MGMT_STATUS_NOT_SUPPORTED);
1306 if (cp->val != 0x00 && cp->val != 0x01)
1307 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1308 MGMT_STATUS_INVALID_PARAMS);
1313 enabled = lmp_host_le_capable(hdev);
1315 if (!hdev_is_powered(hdev) || val == enabled) {
1316 bool changed = false;
1318 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1319 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1323 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1328 err = new_settings(hdev, sk);
1333 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1334 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1339 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1345 memset(&hci_cp, 0, sizeof(hci_cp));
1349 hci_cp.simul = lmp_le_br_capable(hdev);
1352 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1355 mgmt_pending_remove(cmd);
1358 hci_dev_unlock(hdev);
1362 /* This is a helper function to test for pending mgmt commands that can
1363 * cause CoD or EIR HCI commands. We can only allow one such pending
1364 * mgmt command at a time since otherwise we cannot easily track what
1365 * the current values are, will be, and based on that calculate if a new
1366 * HCI command needs to be sent and if yes with what value.
1368 static bool pending_eir_or_class(struct hci_dev *hdev)
1370 struct pending_cmd *cmd;
1372 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1373 switch (cmd->opcode) {
1374 case MGMT_OP_ADD_UUID:
1375 case MGMT_OP_REMOVE_UUID:
1376 case MGMT_OP_SET_DEV_CLASS:
1377 case MGMT_OP_SET_POWERED:
1385 static const u8 bluetooth_base_uuid[] = {
1386 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1387 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1390 static u8 get_uuid_size(const u8 *uuid)
1394 if (memcmp(uuid, bluetooth_base_uuid, 12))
1397 val = get_unaligned_le32(&uuid[12]);
1404 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1406 struct pending_cmd *cmd;
1410 cmd = mgmt_pending_find(mgmt_op, hdev);
1414 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1415 hdev->dev_class, 3);
1417 mgmt_pending_remove(cmd);
1420 hci_dev_unlock(hdev);
1423 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1425 BT_DBG("status 0x%02x", status);
1427 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1430 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1432 struct mgmt_cp_add_uuid *cp = data;
1433 struct pending_cmd *cmd;
1434 struct hci_request req;
1435 struct bt_uuid *uuid;
1438 BT_DBG("request for %s", hdev->name);
1442 if (pending_eir_or_class(hdev)) {
1443 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1448 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1454 memcpy(uuid->uuid, cp->uuid, 16);
1455 uuid->svc_hint = cp->svc_hint;
1456 uuid->size = get_uuid_size(cp->uuid);
1458 list_add_tail(&uuid->list, &hdev->uuids);
1460 hci_req_init(&req, hdev);
1465 err = hci_req_run(&req, add_uuid_complete);
1467 if (err != -ENODATA)
1470 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1471 hdev->dev_class, 3);
1475 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1484 hci_dev_unlock(hdev);
1488 static bool enable_service_cache(struct hci_dev *hdev)
1490 if (!hdev_is_powered(hdev))
1493 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1494 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1502 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1504 BT_DBG("status 0x%02x", status);
1506 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1509 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1512 struct mgmt_cp_remove_uuid *cp = data;
1513 struct pending_cmd *cmd;
1514 struct bt_uuid *match, *tmp;
1515 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1516 struct hci_request req;
1519 BT_DBG("request for %s", hdev->name);
1523 if (pending_eir_or_class(hdev)) {
1524 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1529 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1530 err = hci_uuids_clear(hdev);
1532 if (enable_service_cache(hdev)) {
1533 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1534 0, hdev->dev_class, 3);
1543 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1544 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1547 list_del(&match->list);
1553 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1554 MGMT_STATUS_INVALID_PARAMS);
1559 hci_req_init(&req, hdev);
1564 err = hci_req_run(&req, remove_uuid_complete);
1566 if (err != -ENODATA)
1569 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1570 hdev->dev_class, 3);
1574 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1583 hci_dev_unlock(hdev);
1587 static void set_class_complete(struct hci_dev *hdev, u8 status)
1589 BT_DBG("status 0x%02x", status);
1591 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1594 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1597 struct mgmt_cp_set_dev_class *cp = data;
1598 struct pending_cmd *cmd;
1599 struct hci_request req;
1602 BT_DBG("request for %s", hdev->name);
1604 if (!lmp_bredr_capable(hdev))
1605 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1606 MGMT_STATUS_NOT_SUPPORTED);
1610 if (pending_eir_or_class(hdev)) {
1611 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1616 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1617 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1618 MGMT_STATUS_INVALID_PARAMS);
1622 hdev->major_class = cp->major;
1623 hdev->minor_class = cp->minor;
1625 if (!hdev_is_powered(hdev)) {
1626 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1627 hdev->dev_class, 3);
1631 hci_req_init(&req, hdev);
1633 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1634 hci_dev_unlock(hdev);
1635 cancel_delayed_work_sync(&hdev->service_cache);
1642 err = hci_req_run(&req, set_class_complete);
1644 if (err != -ENODATA)
1647 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1648 hdev->dev_class, 3);
1652 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1661 hci_dev_unlock(hdev);
1665 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1668 struct mgmt_cp_load_link_keys *cp = data;
1669 u16 key_count, expected_len;
1672 key_count = __le16_to_cpu(cp->key_count);
1674 expected_len = sizeof(*cp) + key_count *
1675 sizeof(struct mgmt_link_key_info);
1676 if (expected_len != len) {
1677 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1679 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1680 MGMT_STATUS_INVALID_PARAMS);
1683 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1684 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1685 MGMT_STATUS_INVALID_PARAMS);
1687 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1690 for (i = 0; i < key_count; i++) {
1691 struct mgmt_link_key_info *key = &cp->keys[i];
1693 if (key->addr.type != BDADDR_BREDR)
1694 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1695 MGMT_STATUS_INVALID_PARAMS);
1700 hci_link_keys_clear(hdev);
1702 set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1705 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1707 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1709 for (i = 0; i < key_count; i++) {
1710 struct mgmt_link_key_info *key = &cp->keys[i];
1712 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1713 key->type, key->pin_len);
1716 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1718 hci_dev_unlock(hdev);
1723 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1724 u8 addr_type, struct sock *skip_sk)
1726 struct mgmt_ev_device_unpaired ev;
1728 bacpy(&ev.addr.bdaddr, bdaddr);
1729 ev.addr.type = addr_type;
1731 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1735 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1738 struct mgmt_cp_unpair_device *cp = data;
1739 struct mgmt_rp_unpair_device rp;
1740 struct hci_cp_disconnect dc;
1741 struct pending_cmd *cmd;
1742 struct hci_conn *conn;
1745 memset(&rp, 0, sizeof(rp));
1746 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1747 rp.addr.type = cp->addr.type;
1749 if (!bdaddr_type_is_valid(cp->addr.type))
1750 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1751 MGMT_STATUS_INVALID_PARAMS,
1754 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1755 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1756 MGMT_STATUS_INVALID_PARAMS,
1761 if (!hdev_is_powered(hdev)) {
1762 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1763 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1767 if (cp->addr.type == BDADDR_BREDR)
1768 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1770 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1773 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1774 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1778 if (cp->disconnect) {
1779 if (cp->addr.type == BDADDR_BREDR)
1780 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1783 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1790 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1792 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1796 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1803 dc.handle = cpu_to_le16(conn->handle);
1804 dc.reason = 0x13; /* Remote User Terminated Connection */
1805 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1807 mgmt_pending_remove(cmd);
1810 hci_dev_unlock(hdev);
1814 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1817 struct mgmt_cp_disconnect *cp = data;
1818 struct mgmt_rp_disconnect rp;
1819 struct hci_cp_disconnect dc;
1820 struct pending_cmd *cmd;
1821 struct hci_conn *conn;
1826 memset(&rp, 0, sizeof(rp));
1827 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1828 rp.addr.type = cp->addr.type;
1830 if (!bdaddr_type_is_valid(cp->addr.type))
1831 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1832 MGMT_STATUS_INVALID_PARAMS,
1837 if (!test_bit(HCI_UP, &hdev->flags)) {
1838 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1839 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1843 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1844 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1845 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1849 if (cp->addr.type == BDADDR_BREDR)
1850 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1853 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1855 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1856 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1857 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1861 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1867 dc.handle = cpu_to_le16(conn->handle);
1868 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1870 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1872 mgmt_pending_remove(cmd);
1875 hci_dev_unlock(hdev);
1879 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1881 switch (link_type) {
1883 switch (addr_type) {
1884 case ADDR_LE_DEV_PUBLIC:
1885 return BDADDR_LE_PUBLIC;
1888 /* Fallback to LE Random address type */
1889 return BDADDR_LE_RANDOM;
1893 /* Fallback to BR/EDR type */
1894 return BDADDR_BREDR;
1898 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1901 struct mgmt_rp_get_connections *rp;
1911 if (!hdev_is_powered(hdev)) {
1912 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1913 MGMT_STATUS_NOT_POWERED);
1918 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1919 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1923 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1924 rp = kmalloc(rp_len, GFP_KERNEL);
1931 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1932 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1934 bacpy(&rp->addr[i].bdaddr, &c->dst);
1935 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1936 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1941 rp->conn_count = cpu_to_le16(i);
1943 /* Recalculate length in case of filtered SCO connections, etc */
1944 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1946 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1952 hci_dev_unlock(hdev);
1956 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1957 struct mgmt_cp_pin_code_neg_reply *cp)
1959 struct pending_cmd *cmd;
1962 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
1967 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1968 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
1970 mgmt_pending_remove(cmd);
1975 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
1978 struct hci_conn *conn;
1979 struct mgmt_cp_pin_code_reply *cp = data;
1980 struct hci_cp_pin_code_reply reply;
1981 struct pending_cmd *cmd;
1988 if (!hdev_is_powered(hdev)) {
1989 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1990 MGMT_STATUS_NOT_POWERED);
1994 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
1996 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1997 MGMT_STATUS_NOT_CONNECTED);
2001 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2002 struct mgmt_cp_pin_code_neg_reply ncp;
2004 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2006 BT_ERR("PIN code is not 16 bytes long");
2008 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2010 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2011 MGMT_STATUS_INVALID_PARAMS);
2016 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2022 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2023 reply.pin_len = cp->pin_len;
2024 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2026 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2028 mgmt_pending_remove(cmd);
2031 hci_dev_unlock(hdev);
2035 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2038 struct mgmt_cp_set_io_capability *cp = data;
2044 hdev->io_capability = cp->io_capability;
2046 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2047 hdev->io_capability);
2049 hci_dev_unlock(hdev);
2051 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2055 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2057 struct hci_dev *hdev = conn->hdev;
2058 struct pending_cmd *cmd;
2060 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2061 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2064 if (cmd->user_data != conn)
2073 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2075 struct mgmt_rp_pair_device rp;
2076 struct hci_conn *conn = cmd->user_data;
2078 bacpy(&rp.addr.bdaddr, &conn->dst);
2079 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2081 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2084 /* So we don't get further callbacks for this connection */
2085 conn->connect_cfm_cb = NULL;
2086 conn->security_cfm_cb = NULL;
2087 conn->disconn_cfm_cb = NULL;
2091 mgmt_pending_remove(cmd);
2094 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2096 struct pending_cmd *cmd;
2098 BT_DBG("status %u", status);
2100 cmd = find_pairing(conn);
2102 BT_DBG("Unable to find a pending command");
2104 pairing_complete(cmd, mgmt_status(status));
2107 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2109 struct pending_cmd *cmd;
2111 BT_DBG("status %u", status);
2116 cmd = find_pairing(conn);
2118 BT_DBG("Unable to find a pending command");
2120 pairing_complete(cmd, mgmt_status(status));
2123 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2126 struct mgmt_cp_pair_device *cp = data;
2127 struct mgmt_rp_pair_device rp;
2128 struct pending_cmd *cmd;
2129 u8 sec_level, auth_type;
2130 struct hci_conn *conn;
2135 memset(&rp, 0, sizeof(rp));
2136 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2137 rp.addr.type = cp->addr.type;
2139 if (!bdaddr_type_is_valid(cp->addr.type))
2140 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2141 MGMT_STATUS_INVALID_PARAMS,
2146 if (!hdev_is_powered(hdev)) {
2147 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2148 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2152 sec_level = BT_SECURITY_MEDIUM;
2153 if (cp->io_cap == 0x03)
2154 auth_type = HCI_AT_DEDICATED_BONDING;
2156 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2158 if (cp->addr.type == BDADDR_BREDR)
2159 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2160 cp->addr.type, sec_level, auth_type);
2162 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2163 cp->addr.type, sec_level, auth_type);
2168 if (PTR_ERR(conn) == -EBUSY)
2169 status = MGMT_STATUS_BUSY;
2171 status = MGMT_STATUS_CONNECT_FAILED;
2173 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2179 if (conn->connect_cfm_cb) {
2181 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2182 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2186 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2193 /* For LE, just connecting isn't a proof that the pairing finished */
2194 if (cp->addr.type == BDADDR_BREDR)
2195 conn->connect_cfm_cb = pairing_complete_cb;
2197 conn->connect_cfm_cb = le_connect_complete_cb;
2199 conn->security_cfm_cb = pairing_complete_cb;
2200 conn->disconn_cfm_cb = pairing_complete_cb;
2201 conn->io_capability = cp->io_cap;
2202 cmd->user_data = conn;
2204 if (conn->state == BT_CONNECTED &&
2205 hci_conn_security(conn, sec_level, auth_type))
2206 pairing_complete(cmd, 0);
2211 hci_dev_unlock(hdev);
2215 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2218 struct mgmt_addr_info *addr = data;
2219 struct pending_cmd *cmd;
2220 struct hci_conn *conn;
2227 if (!hdev_is_powered(hdev)) {
2228 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2229 MGMT_STATUS_NOT_POWERED);
2233 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2235 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2236 MGMT_STATUS_INVALID_PARAMS);
2240 conn = cmd->user_data;
2242 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2243 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2244 MGMT_STATUS_INVALID_PARAMS);
2248 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2250 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2251 addr, sizeof(*addr));
2253 hci_dev_unlock(hdev);
2257 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2258 bdaddr_t *bdaddr, u8 type, u16 mgmt_op,
2259 u16 hci_op, __le32 passkey)
2261 struct pending_cmd *cmd;
2262 struct hci_conn *conn;
2267 if (!hdev_is_powered(hdev)) {
2268 err = cmd_status(sk, hdev->id, mgmt_op,
2269 MGMT_STATUS_NOT_POWERED);
2273 if (type == BDADDR_BREDR)
2274 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
2276 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
2279 err = cmd_status(sk, hdev->id, mgmt_op,
2280 MGMT_STATUS_NOT_CONNECTED);
2284 if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
2285 /* Continue with pairing via SMP */
2286 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2289 err = cmd_status(sk, hdev->id, mgmt_op,
2290 MGMT_STATUS_SUCCESS);
2292 err = cmd_status(sk, hdev->id, mgmt_op,
2293 MGMT_STATUS_FAILED);
2298 cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr));
2304 /* Continue with pairing via HCI */
2305 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2306 struct hci_cp_user_passkey_reply cp;
2308 bacpy(&cp.bdaddr, bdaddr);
2309 cp.passkey = passkey;
2310 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2312 err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr);
2315 mgmt_pending_remove(cmd);
2318 hci_dev_unlock(hdev);
2322 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2323 void *data, u16 len)
2325 struct mgmt_cp_pin_code_neg_reply *cp = data;
2329 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2330 MGMT_OP_PIN_CODE_NEG_REPLY,
2331 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2334 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2337 struct mgmt_cp_user_confirm_reply *cp = data;
2341 if (len != sizeof(*cp))
2342 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2343 MGMT_STATUS_INVALID_PARAMS);
2345 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2346 MGMT_OP_USER_CONFIRM_REPLY,
2347 HCI_OP_USER_CONFIRM_REPLY, 0);
2350 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2351 void *data, u16 len)
2353 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2357 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2358 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2359 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2362 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2365 struct mgmt_cp_user_passkey_reply *cp = data;
2369 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2370 MGMT_OP_USER_PASSKEY_REPLY,
2371 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2374 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2375 void *data, u16 len)
2377 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2381 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2382 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2383 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2386 static void update_name(struct hci_request *req)
2388 struct hci_dev *hdev = req->hdev;
2389 struct hci_cp_write_local_name cp;
2391 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2393 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2396 static void set_name_complete(struct hci_dev *hdev, u8 status)
2398 struct mgmt_cp_set_local_name *cp;
2399 struct pending_cmd *cmd;
2401 BT_DBG("status 0x%02x", status);
2405 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2412 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2413 mgmt_status(status));
2415 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2418 mgmt_pending_remove(cmd);
2421 hci_dev_unlock(hdev);
2424 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2427 struct mgmt_cp_set_local_name *cp = data;
2428 struct pending_cmd *cmd;
2429 struct hci_request req;
2436 /* If the old values are the same as the new ones just return a
2437 * direct command complete event.
2439 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2440 !memcmp(hdev->short_name, cp->short_name,
2441 sizeof(hdev->short_name))) {
2442 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2447 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2449 if (!hdev_is_powered(hdev)) {
2450 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2452 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2457 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2463 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2469 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2471 hci_req_init(&req, hdev);
2473 if (lmp_bredr_capable(hdev)) {
2478 if (lmp_le_capable(hdev))
2479 hci_update_ad(&req);
2481 err = hci_req_run(&req, set_name_complete);
2483 mgmt_pending_remove(cmd);
2486 hci_dev_unlock(hdev);
2490 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2491 void *data, u16 data_len)
2493 struct pending_cmd *cmd;
2496 BT_DBG("%s", hdev->name);
2500 if (!hdev_is_powered(hdev)) {
2501 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2502 MGMT_STATUS_NOT_POWERED);
2506 if (!lmp_ssp_capable(hdev)) {
2507 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2508 MGMT_STATUS_NOT_SUPPORTED);
2512 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2513 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2518 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2524 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2526 mgmt_pending_remove(cmd);
2529 hci_dev_unlock(hdev);
2533 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2534 void *data, u16 len)
2536 struct mgmt_cp_add_remote_oob_data *cp = data;
2540 BT_DBG("%s ", hdev->name);
2544 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2547 status = MGMT_STATUS_FAILED;
2549 status = MGMT_STATUS_SUCCESS;
2551 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2552 &cp->addr, sizeof(cp->addr));
2554 hci_dev_unlock(hdev);
2558 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2559 void *data, u16 len)
2561 struct mgmt_cp_remove_remote_oob_data *cp = data;
2565 BT_DBG("%s", hdev->name);
2569 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2571 status = MGMT_STATUS_INVALID_PARAMS;
2573 status = MGMT_STATUS_SUCCESS;
2575 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2576 status, &cp->addr, sizeof(cp->addr));
2578 hci_dev_unlock(hdev);
2582 int mgmt_interleaved_discovery(struct hci_dev *hdev)
2586 BT_DBG("%s", hdev->name);
2590 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
2592 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2594 hci_dev_unlock(hdev);
2599 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2600 void *data, u16 len)
2602 struct mgmt_cp_start_discovery *cp = data;
2603 struct pending_cmd *cmd;
2606 BT_DBG("%s", hdev->name);
2610 if (!hdev_is_powered(hdev)) {
2611 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2612 MGMT_STATUS_NOT_POWERED);
2616 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2617 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2622 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2623 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2628 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2634 hdev->discovery.type = cp->type;
2636 switch (hdev->discovery.type) {
2637 case DISCOV_TYPE_BREDR:
2638 if (!lmp_bredr_capable(hdev)) {
2639 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2640 MGMT_STATUS_NOT_SUPPORTED);
2641 mgmt_pending_remove(cmd);
2645 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2648 case DISCOV_TYPE_LE:
2649 if (!lmp_host_le_capable(hdev)) {
2650 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2651 MGMT_STATUS_NOT_SUPPORTED);
2652 mgmt_pending_remove(cmd);
2656 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2657 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2660 case DISCOV_TYPE_INTERLEAVED:
2661 if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
2662 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2663 MGMT_STATUS_NOT_SUPPORTED);
2664 mgmt_pending_remove(cmd);
2668 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, LE_SCAN_WIN,
2669 LE_SCAN_TIMEOUT_BREDR_LE);
2673 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2674 MGMT_STATUS_INVALID_PARAMS);
2675 mgmt_pending_remove(cmd);
2680 mgmt_pending_remove(cmd);
2682 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2685 hci_dev_unlock(hdev);
2689 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2692 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2693 struct pending_cmd *cmd;
2694 struct hci_cp_remote_name_req_cancel cp;
2695 struct inquiry_entry *e;
2698 BT_DBG("%s", hdev->name);
2702 if (!hci_discovery_active(hdev)) {
2703 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2704 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2705 sizeof(mgmt_cp->type));
2709 if (hdev->discovery.type != mgmt_cp->type) {
2710 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2711 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2712 sizeof(mgmt_cp->type));
2716 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2722 switch (hdev->discovery.state) {
2723 case DISCOVERY_FINDING:
2724 if (test_bit(HCI_INQUIRY, &hdev->flags))
2725 err = hci_cancel_inquiry(hdev);
2727 err = hci_cancel_le_scan(hdev);
2731 case DISCOVERY_RESOLVING:
2732 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2735 mgmt_pending_remove(cmd);
2736 err = cmd_complete(sk, hdev->id,
2737 MGMT_OP_STOP_DISCOVERY, 0,
2739 sizeof(mgmt_cp->type));
2740 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2744 bacpy(&cp.bdaddr, &e->data.bdaddr);
2745 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2751 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2756 mgmt_pending_remove(cmd);
2758 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2761 hci_dev_unlock(hdev);
2765 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2768 struct mgmt_cp_confirm_name *cp = data;
2769 struct inquiry_entry *e;
2772 BT_DBG("%s", hdev->name);
2776 if (!hci_discovery_active(hdev)) {
2777 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2778 MGMT_STATUS_FAILED);
2782 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2784 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2785 MGMT_STATUS_INVALID_PARAMS);
2789 if (cp->name_known) {
2790 e->name_state = NAME_KNOWN;
2793 e->name_state = NAME_NEEDED;
2794 hci_inquiry_cache_update_resolve(hdev, e);
2797 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2801 hci_dev_unlock(hdev);
2805 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2808 struct mgmt_cp_block_device *cp = data;
2812 BT_DBG("%s", hdev->name);
2814 if (!bdaddr_type_is_valid(cp->addr.type))
2815 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2816 MGMT_STATUS_INVALID_PARAMS,
2817 &cp->addr, sizeof(cp->addr));
2821 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2823 status = MGMT_STATUS_FAILED;
2825 status = MGMT_STATUS_SUCCESS;
2827 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2828 &cp->addr, sizeof(cp->addr));
2830 hci_dev_unlock(hdev);
2835 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2838 struct mgmt_cp_unblock_device *cp = data;
2842 BT_DBG("%s", hdev->name);
2844 if (!bdaddr_type_is_valid(cp->addr.type))
2845 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
2846 MGMT_STATUS_INVALID_PARAMS,
2847 &cp->addr, sizeof(cp->addr));
2851 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2853 status = MGMT_STATUS_INVALID_PARAMS;
2855 status = MGMT_STATUS_SUCCESS;
2857 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2858 &cp->addr, sizeof(cp->addr));
2860 hci_dev_unlock(hdev);
2865 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2868 struct mgmt_cp_set_device_id *cp = data;
2869 struct hci_request req;
2873 BT_DBG("%s", hdev->name);
2875 source = __le16_to_cpu(cp->source);
2877 if (source > 0x0002)
2878 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2879 MGMT_STATUS_INVALID_PARAMS);
2883 hdev->devid_source = source;
2884 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2885 hdev->devid_product = __le16_to_cpu(cp->product);
2886 hdev->devid_version = __le16_to_cpu(cp->version);
2888 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2890 hci_req_init(&req, hdev);
2892 hci_req_run(&req, NULL);
2894 hci_dev_unlock(hdev);
2899 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
2901 struct pending_cmd *cmd;
2903 BT_DBG("status 0x%02x", status);
2907 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2912 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2913 mgmt_status(status));
2915 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2916 new_settings(hdev, cmd->sk);
2919 mgmt_pending_remove(cmd);
2922 hci_dev_unlock(hdev);
2925 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2926 void *data, u16 len)
2928 struct mgmt_mode *cp = data;
2929 struct hci_cp_write_page_scan_activity acp;
2930 struct pending_cmd *cmd;
2931 struct hci_request req;
2935 BT_DBG("%s", hdev->name);
2937 if (!lmp_bredr_capable(hdev))
2938 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2939 MGMT_STATUS_NOT_SUPPORTED);
2941 if (cp->val != 0x00 && cp->val != 0x01)
2942 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2943 MGMT_STATUS_INVALID_PARAMS);
2945 if (!hdev_is_powered(hdev))
2946 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2947 MGMT_STATUS_NOT_POWERED);
2949 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2950 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2951 MGMT_STATUS_REJECTED);
2956 type = PAGE_SCAN_TYPE_INTERLACED;
2958 /* 160 msec page scan interval */
2959 acp.interval = __constant_cpu_to_le16(0x0100);
2961 type = PAGE_SCAN_TYPE_STANDARD; /* default */
2963 /* default 1.28 sec page scan */
2964 acp.interval = __constant_cpu_to_le16(0x0800);
2967 /* default 11.25 msec page scan window */
2968 acp.window = __constant_cpu_to_le16(0x0012);
2970 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
2977 hci_req_init(&req, hdev);
2979 hci_req_add(&req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp), &acp);
2980 hci_req_add(&req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
2982 err = hci_req_run(&req, fast_connectable_complete);
2984 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2985 MGMT_STATUS_FAILED);
2986 mgmt_pending_remove(cmd);
2990 hci_dev_unlock(hdev);
2995 static bool ltk_is_valid(struct mgmt_ltk_info *key)
2997 if (key->authenticated != 0x00 && key->authenticated != 0x01)
2999 if (key->master != 0x00 && key->master != 0x01)
3001 if (!bdaddr_type_is_le(key->addr.type))
3006 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3007 void *cp_data, u16 len)
3009 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3010 u16 key_count, expected_len;
3013 key_count = __le16_to_cpu(cp->key_count);
3015 expected_len = sizeof(*cp) + key_count *
3016 sizeof(struct mgmt_ltk_info);
3017 if (expected_len != len) {
3018 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3020 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3021 MGMT_STATUS_INVALID_PARAMS);
3024 BT_DBG("%s key_count %u", hdev->name, key_count);
3026 for (i = 0; i < key_count; i++) {
3027 struct mgmt_ltk_info *key = &cp->keys[i];
3029 if (!ltk_is_valid(key))
3030 return cmd_status(sk, hdev->id,
3031 MGMT_OP_LOAD_LONG_TERM_KEYS,
3032 MGMT_STATUS_INVALID_PARAMS);
3037 hci_smp_ltks_clear(hdev);
3039 for (i = 0; i < key_count; i++) {
3040 struct mgmt_ltk_info *key = &cp->keys[i];
3046 type = HCI_SMP_LTK_SLAVE;
3048 hci_add_ltk(hdev, &key->addr.bdaddr,
3049 bdaddr_to_le(key->addr.type),
3050 type, 0, key->authenticated, key->val,
3051 key->enc_size, key->ediv, key->rand);
3054 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3057 hci_dev_unlock(hdev);
3062 static const struct mgmt_handler {
3063 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3067 } mgmt_handlers[] = {
3068 { NULL }, /* 0x0000 (no command) */
3069 { read_version, false, MGMT_READ_VERSION_SIZE },
3070 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3071 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3072 { read_controller_info, false, MGMT_READ_INFO_SIZE },
3073 { set_powered, false, MGMT_SETTING_SIZE },
3074 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
3075 { set_connectable, false, MGMT_SETTING_SIZE },
3076 { set_fast_connectable, false, MGMT_SETTING_SIZE },
3077 { set_pairable, false, MGMT_SETTING_SIZE },
3078 { set_link_security, false, MGMT_SETTING_SIZE },
3079 { set_ssp, false, MGMT_SETTING_SIZE },
3080 { set_hs, false, MGMT_SETTING_SIZE },
3081 { set_le, false, MGMT_SETTING_SIZE },
3082 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
3083 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
3084 { add_uuid, false, MGMT_ADD_UUID_SIZE },
3085 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
3086 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
3087 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
3088 { disconnect, false, MGMT_DISCONNECT_SIZE },
3089 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
3090 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
3091 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
3092 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
3093 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
3094 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
3095 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
3096 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
3097 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
3098 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
3099 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
3100 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
3101 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
3102 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
3103 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3104 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3105 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3106 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3107 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3108 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3112 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3116 struct mgmt_hdr *hdr;
3117 u16 opcode, index, len;
3118 struct hci_dev *hdev = NULL;
3119 const struct mgmt_handler *handler;
3122 BT_DBG("got %zu bytes", msglen);
3124 if (msglen < sizeof(*hdr))
3127 buf = kmalloc(msglen, GFP_KERNEL);
3131 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3137 opcode = __le16_to_cpu(hdr->opcode);
3138 index = __le16_to_cpu(hdr->index);
3139 len = __le16_to_cpu(hdr->len);
3141 if (len != msglen - sizeof(*hdr)) {
3146 if (index != MGMT_INDEX_NONE) {
3147 hdev = hci_dev_get(index);
3149 err = cmd_status(sk, index, opcode,
3150 MGMT_STATUS_INVALID_INDEX);
3155 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3156 mgmt_handlers[opcode].func == NULL) {
3157 BT_DBG("Unknown op %u", opcode);
3158 err = cmd_status(sk, index, opcode,
3159 MGMT_STATUS_UNKNOWN_COMMAND);
3163 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3164 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3165 err = cmd_status(sk, index, opcode,
3166 MGMT_STATUS_INVALID_INDEX);
3170 handler = &mgmt_handlers[opcode];
3172 if ((handler->var_len && len < handler->data_len) ||
3173 (!handler->var_len && len != handler->data_len)) {
3174 err = cmd_status(sk, index, opcode,
3175 MGMT_STATUS_INVALID_PARAMS);
3180 mgmt_init_hdev(sk, hdev);
3182 cp = buf + sizeof(*hdr);
3184 err = handler->func(sk, hdev, cp, len);
3198 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
3202 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
3203 mgmt_pending_remove(cmd);
3206 int mgmt_index_added(struct hci_dev *hdev)
3208 if (!mgmt_valid_hdev(hdev))
3211 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3214 int mgmt_index_removed(struct hci_dev *hdev)
3216 u8 status = MGMT_STATUS_INVALID_INDEX;
3218 if (!mgmt_valid_hdev(hdev))
3221 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3223 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3228 struct hci_dev *hdev;
3232 static void settings_rsp(struct pending_cmd *cmd, void *data)
3234 struct cmd_lookup *match = data;
3236 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
3238 list_del(&cmd->list);
3240 if (match->sk == NULL) {
3241 match->sk = cmd->sk;
3242 sock_hold(match->sk);
3245 mgmt_pending_free(cmd);
3248 static void set_bredr_scan(struct hci_request *req)
3250 struct hci_dev *hdev = req->hdev;
3253 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3255 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3256 scan |= SCAN_INQUIRY;
3259 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3262 static void powered_complete(struct hci_dev *hdev, u8 status)
3264 struct cmd_lookup match = { NULL, hdev };
3266 BT_DBG("status 0x%02x", status);
3270 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3272 new_settings(hdev, match.sk);
3274 hci_dev_unlock(hdev);
3280 static int powered_update_hci(struct hci_dev *hdev)
3282 struct hci_request req;
3285 hci_req_init(&req, hdev);
3287 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3288 !lmp_host_ssp_capable(hdev)) {
3291 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3294 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
3295 struct hci_cp_write_le_host_supported cp;
3298 cp.simul = lmp_le_br_capable(hdev);
3300 /* Check first if we already have the right
3301 * host state (host features set)
3303 if (cp.le != lmp_host_le_capable(hdev) ||
3304 cp.simul != lmp_host_le_br_capable(hdev))
3305 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3309 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3310 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3311 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3312 sizeof(link_sec), &link_sec);
3314 if (lmp_bredr_capable(hdev)) {
3315 set_bredr_scan(&req);
3321 return hci_req_run(&req, powered_complete);
3324 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3326 struct cmd_lookup match = { NULL, hdev };
3327 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3328 u8 zero_cod[] = { 0, 0, 0 };
3331 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3335 if (powered_update_hci(hdev) == 0)
3338 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3343 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3344 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3346 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3347 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3348 zero_cod, sizeof(zero_cod), NULL);
3351 err = new_settings(hdev, match.sk);
3359 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3361 struct cmd_lookup match = { NULL, hdev };
3362 bool changed = false;
3366 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3369 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3373 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3377 err = new_settings(hdev, match.sk);
3385 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3387 struct pending_cmd *cmd;
3388 bool changed = false;
3392 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3395 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3399 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
3402 err = new_settings(hdev, cmd ? cmd->sk : NULL);
3407 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3409 u8 mgmt_err = mgmt_status(status);
3411 if (scan & SCAN_PAGE)
3412 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3413 cmd_status_rsp, &mgmt_err);
3415 if (scan & SCAN_INQUIRY)
3416 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3417 cmd_status_rsp, &mgmt_err);
3422 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3425 struct mgmt_ev_new_link_key ev;
3427 memset(&ev, 0, sizeof(ev));
3429 ev.store_hint = persistent;
3430 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3431 ev.key.addr.type = BDADDR_BREDR;
3432 ev.key.type = key->type;
3433 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3434 ev.key.pin_len = key->pin_len;
3436 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3439 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3441 struct mgmt_ev_new_long_term_key ev;
3443 memset(&ev, 0, sizeof(ev));
3445 ev.store_hint = persistent;
3446 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3447 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3448 ev.key.authenticated = key->authenticated;
3449 ev.key.enc_size = key->enc_size;
3450 ev.key.ediv = key->ediv;
3452 if (key->type == HCI_SMP_LTK)
3455 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3456 memcpy(ev.key.val, key->val, sizeof(key->val));
3458 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3462 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3463 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3467 struct mgmt_ev_device_connected *ev = (void *) buf;
3470 bacpy(&ev->addr.bdaddr, bdaddr);
3471 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3473 ev->flags = __cpu_to_le32(flags);
3476 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3479 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3480 eir_len = eir_append_data(ev->eir, eir_len,
3481 EIR_CLASS_OF_DEV, dev_class, 3);
3483 ev->eir_len = cpu_to_le16(eir_len);
3485 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3486 sizeof(*ev) + eir_len, NULL);
3489 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3491 struct mgmt_cp_disconnect *cp = cmd->param;
3492 struct sock **sk = data;
3493 struct mgmt_rp_disconnect rp;
3495 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3496 rp.addr.type = cp->addr.type;
3498 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3504 mgmt_pending_remove(cmd);
3507 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3509 struct hci_dev *hdev = data;
3510 struct mgmt_cp_unpair_device *cp = cmd->param;
3511 struct mgmt_rp_unpair_device rp;
3513 memset(&rp, 0, sizeof(rp));
3514 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3515 rp.addr.type = cp->addr.type;
3517 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3519 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3521 mgmt_pending_remove(cmd);
3524 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3525 u8 link_type, u8 addr_type, u8 reason)
3527 struct mgmt_ev_device_disconnected ev;
3528 struct sock *sk = NULL;
3531 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3533 bacpy(&ev.addr.bdaddr, bdaddr);
3534 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3537 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3543 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3549 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3550 u8 link_type, u8 addr_type, u8 status)
3552 struct mgmt_rp_disconnect rp;
3553 struct pending_cmd *cmd;
3556 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3559 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3563 bacpy(&rp.addr.bdaddr, bdaddr);
3564 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3566 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3567 mgmt_status(status), &rp, sizeof(rp));
3569 mgmt_pending_remove(cmd);
3574 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3575 u8 addr_type, u8 status)
3577 struct mgmt_ev_connect_failed ev;
3579 bacpy(&ev.addr.bdaddr, bdaddr);
3580 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3581 ev.status = mgmt_status(status);
3583 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3586 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3588 struct mgmt_ev_pin_code_request ev;
3590 bacpy(&ev.addr.bdaddr, bdaddr);
3591 ev.addr.type = BDADDR_BREDR;
3594 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3598 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3601 struct pending_cmd *cmd;
3602 struct mgmt_rp_pin_code_reply rp;
3605 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3609 bacpy(&rp.addr.bdaddr, bdaddr);
3610 rp.addr.type = BDADDR_BREDR;
3612 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3613 mgmt_status(status), &rp, sizeof(rp));
3615 mgmt_pending_remove(cmd);
3620 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3623 struct pending_cmd *cmd;
3624 struct mgmt_rp_pin_code_reply rp;
3627 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3631 bacpy(&rp.addr.bdaddr, bdaddr);
3632 rp.addr.type = BDADDR_BREDR;
3634 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3635 mgmt_status(status), &rp, sizeof(rp));
3637 mgmt_pending_remove(cmd);
3642 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3643 u8 link_type, u8 addr_type, __le32 value,
3646 struct mgmt_ev_user_confirm_request ev;
3648 BT_DBG("%s", hdev->name);
3650 bacpy(&ev.addr.bdaddr, bdaddr);
3651 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3652 ev.confirm_hint = confirm_hint;
3655 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3659 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3660 u8 link_type, u8 addr_type)
3662 struct mgmt_ev_user_passkey_request ev;
3664 BT_DBG("%s", hdev->name);
3666 bacpy(&ev.addr.bdaddr, bdaddr);
3667 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3669 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3673 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3674 u8 link_type, u8 addr_type, u8 status,
3677 struct pending_cmd *cmd;
3678 struct mgmt_rp_user_confirm_reply rp;
3681 cmd = mgmt_pending_find(opcode, hdev);
3685 bacpy(&rp.addr.bdaddr, bdaddr);
3686 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3687 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3690 mgmt_pending_remove(cmd);
3695 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3696 u8 link_type, u8 addr_type, u8 status)
3698 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3699 status, MGMT_OP_USER_CONFIRM_REPLY);
3702 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3703 u8 link_type, u8 addr_type, u8 status)
3705 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3707 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3710 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3711 u8 link_type, u8 addr_type, u8 status)
3713 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3714 status, MGMT_OP_USER_PASSKEY_REPLY);
3717 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3718 u8 link_type, u8 addr_type, u8 status)
3720 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3722 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3725 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3726 u8 link_type, u8 addr_type, u32 passkey,
3729 struct mgmt_ev_passkey_notify ev;
3731 BT_DBG("%s", hdev->name);
3733 bacpy(&ev.addr.bdaddr, bdaddr);
3734 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3735 ev.passkey = __cpu_to_le32(passkey);
3736 ev.entered = entered;
3738 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3741 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3742 u8 addr_type, u8 status)
3744 struct mgmt_ev_auth_failed ev;
3746 bacpy(&ev.addr.bdaddr, bdaddr);
3747 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3748 ev.status = mgmt_status(status);
3750 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3753 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3755 struct cmd_lookup match = { NULL, hdev };
3756 bool changed = false;
3760 u8 mgmt_err = mgmt_status(status);
3761 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3762 cmd_status_rsp, &mgmt_err);
3766 if (test_bit(HCI_AUTH, &hdev->flags)) {
3767 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3770 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3774 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3778 err = new_settings(hdev, match.sk);
3786 static void clear_eir(struct hci_request *req)
3788 struct hci_dev *hdev = req->hdev;
3789 struct hci_cp_write_eir cp;
3791 if (!lmp_ext_inq_capable(hdev))
3794 memset(hdev->eir, 0, sizeof(hdev->eir));
3796 memset(&cp, 0, sizeof(cp));
3798 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3801 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3803 struct cmd_lookup match = { NULL, hdev };
3804 struct hci_request req;
3805 bool changed = false;
3809 u8 mgmt_err = mgmt_status(status);
3811 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
3813 err = new_settings(hdev, NULL);
3815 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3822 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3825 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3829 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
3832 err = new_settings(hdev, match.sk);
3837 hci_req_init(&req, hdev);
3839 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3844 hci_req_run(&req, NULL);
3849 static void sk_lookup(struct pending_cmd *cmd, void *data)
3851 struct cmd_lookup *match = data;
3853 if (match->sk == NULL) {
3854 match->sk = cmd->sk;
3855 sock_hold(match->sk);
3859 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3862 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3865 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
3866 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
3867 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
3870 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3879 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
3881 struct mgmt_cp_set_local_name ev;
3882 struct pending_cmd *cmd;
3887 memset(&ev, 0, sizeof(ev));
3888 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3889 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
3891 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3893 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3895 /* If this is a HCI command related to powering on the
3896 * HCI dev don't send any mgmt signals.
3898 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
3902 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
3903 cmd ? cmd->sk : NULL);
3906 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
3907 u8 *randomizer, u8 status)
3909 struct pending_cmd *cmd;
3912 BT_DBG("%s status %u", hdev->name, status);
3914 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3919 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3920 mgmt_status(status));
3922 struct mgmt_rp_read_local_oob_data rp;
3924 memcpy(rp.hash, hash, sizeof(rp.hash));
3925 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
3927 err = cmd_complete(cmd->sk, hdev->id,
3928 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
3932 mgmt_pending_remove(cmd);
3937 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3939 struct cmd_lookup match = { NULL, hdev };
3940 bool changed = false;
3944 u8 mgmt_err = mgmt_status(status);
3946 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
3948 err = new_settings(hdev, NULL);
3950 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
3957 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3960 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3964 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
3967 err = new_settings(hdev, match.sk);
3975 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3976 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
3977 ssp, u8 *eir, u16 eir_len)
3980 struct mgmt_ev_device_found *ev = (void *) buf;
3983 /* Leave 5 bytes for a potential CoD field */
3984 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
3987 memset(buf, 0, sizeof(buf));
3989 bacpy(&ev->addr.bdaddr, bdaddr);
3990 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3993 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
3995 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
3998 memcpy(ev->eir, eir, eir_len);
4000 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4001 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4004 ev->eir_len = cpu_to_le16(eir_len);
4005 ev_size = sizeof(*ev) + eir_len;
4007 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4010 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4011 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4013 struct mgmt_ev_device_found *ev;
4014 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4017 ev = (struct mgmt_ev_device_found *) buf;
4019 memset(buf, 0, sizeof(buf));
4021 bacpy(&ev->addr.bdaddr, bdaddr);
4022 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4025 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4028 ev->eir_len = cpu_to_le16(eir_len);
4030 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
4031 sizeof(*ev) + eir_len, NULL);
4034 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
4036 struct pending_cmd *cmd;
4040 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4042 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4046 type = hdev->discovery.type;
4048 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4049 &type, sizeof(type));
4050 mgmt_pending_remove(cmd);
4055 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
4057 struct pending_cmd *cmd;
4060 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4064 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4065 &hdev->discovery.type, sizeof(hdev->discovery.type));
4066 mgmt_pending_remove(cmd);
4071 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4073 struct mgmt_ev_discovering ev;
4074 struct pending_cmd *cmd;
4076 BT_DBG("%s discovering %u", hdev->name, discovering);
4079 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4081 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4084 u8 type = hdev->discovery.type;
4086 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4088 mgmt_pending_remove(cmd);
4091 memset(&ev, 0, sizeof(ev));
4092 ev.type = hdev->discovery.type;
4093 ev.discovering = discovering;
4095 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4098 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4100 struct pending_cmd *cmd;
4101 struct mgmt_ev_device_blocked ev;
4103 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4105 bacpy(&ev.addr.bdaddr, bdaddr);
4106 ev.addr.type = type;
4108 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4109 cmd ? cmd->sk : NULL);
4112 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4114 struct pending_cmd *cmd;
4115 struct mgmt_ev_device_unblocked ev;
4117 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4119 bacpy(&ev.addr.bdaddr, bdaddr);
4120 ev.addr.type = type;
4122 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4123 cmd ? cmd->sk : NULL);
4126 module_param(enable_hs, bool, 0644);
4127 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");