2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
84 MGMT_EV_INDEX_REMOVED,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
106 * These LE scan and inquiry parameters were chosen according to LE General
107 * Discovery Procedure specification.
109 #define LE_SCAN_TYPE 0x01
110 #define LE_SCAN_WIN 0x12
111 #define LE_SCAN_INT 0x12
112 #define LE_SCAN_TIMEOUT_LE_ONLY 10240 /* TGAP(gen_disc_scan_min) */
113 #define LE_SCAN_TIMEOUT_BREDR_LE 5120 /* TGAP(100)/2 */
115 #define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
116 #define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
118 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
120 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
121 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
124 struct list_head list;
132 /* HCI to MGMT error code conversion table */
133 static u8 mgmt_status_table[] = {
135 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
136 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
137 MGMT_STATUS_FAILED, /* Hardware Failure */
138 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
139 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
140 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
141 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
142 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
143 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
144 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
145 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
146 MGMT_STATUS_BUSY, /* Command Disallowed */
147 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
148 MGMT_STATUS_REJECTED, /* Rejected Security */
149 MGMT_STATUS_REJECTED, /* Rejected Personal */
150 MGMT_STATUS_TIMEOUT, /* Host Timeout */
151 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
152 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
153 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
154 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
155 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
156 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
157 MGMT_STATUS_BUSY, /* Repeated Attempts */
158 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
159 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
161 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
162 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
163 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
165 MGMT_STATUS_FAILED, /* Unspecified Error */
166 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
167 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
168 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
169 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
170 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
171 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
172 MGMT_STATUS_FAILED, /* Unit Link Key Used */
173 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
174 MGMT_STATUS_TIMEOUT, /* Instant Passed */
175 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
176 MGMT_STATUS_FAILED, /* Transaction Collision */
177 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
178 MGMT_STATUS_REJECTED, /* QoS Rejected */
179 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
180 MGMT_STATUS_REJECTED, /* Insufficient Security */
181 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
182 MGMT_STATUS_BUSY, /* Role Switch Pending */
183 MGMT_STATUS_FAILED, /* Slot Violation */
184 MGMT_STATUS_FAILED, /* Role Switch Failed */
185 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
186 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
187 MGMT_STATUS_BUSY, /* Host Busy Pairing */
188 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
189 MGMT_STATUS_BUSY, /* Controller Busy */
190 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
191 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
192 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
193 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
194 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
197 bool mgmt_valid_hdev(struct hci_dev *hdev)
199 return hdev->dev_type == HCI_BREDR;
202 static u8 mgmt_status(u8 hci_status)
204 if (hci_status < ARRAY_SIZE(mgmt_status_table))
205 return mgmt_status_table[hci_status];
207 return MGMT_STATUS_FAILED;
210 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
213 struct mgmt_hdr *hdr;
214 struct mgmt_ev_cmd_status *ev;
217 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
219 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
223 hdr = (void *) skb_put(skb, sizeof(*hdr));
225 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
226 hdr->index = cpu_to_le16(index);
227 hdr->len = cpu_to_le16(sizeof(*ev));
229 ev = (void *) skb_put(skb, sizeof(*ev));
231 ev->opcode = cpu_to_le16(cmd);
233 err = sock_queue_rcv_skb(sk, skb);
240 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
241 void *rp, size_t rp_len)
244 struct mgmt_hdr *hdr;
245 struct mgmt_ev_cmd_complete *ev;
248 BT_DBG("sock %p", sk);
250 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
254 hdr = (void *) skb_put(skb, sizeof(*hdr));
256 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
257 hdr->index = cpu_to_le16(index);
258 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
260 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
261 ev->opcode = cpu_to_le16(cmd);
265 memcpy(ev->data, rp, rp_len);
267 err = sock_queue_rcv_skb(sk, skb);
274 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
277 struct mgmt_rp_read_version rp;
279 BT_DBG("sock %p", sk);
281 rp.version = MGMT_VERSION;
282 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
284 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
288 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
291 struct mgmt_rp_read_commands *rp;
292 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
293 const u16 num_events = ARRAY_SIZE(mgmt_events);
298 BT_DBG("sock %p", sk);
300 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
302 rp = kmalloc(rp_size, GFP_KERNEL);
306 rp->num_commands = __constant_cpu_to_le16(num_commands);
307 rp->num_events = __constant_cpu_to_le16(num_events);
309 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
310 put_unaligned_le16(mgmt_commands[i], opcode);
312 for (i = 0; i < num_events; i++, opcode++)
313 put_unaligned_le16(mgmt_events[i], opcode);
315 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
322 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
325 struct mgmt_rp_read_index_list *rp;
331 BT_DBG("sock %p", sk);
333 read_lock(&hci_dev_list_lock);
336 list_for_each_entry(d, &hci_dev_list, list) {
337 if (!mgmt_valid_hdev(d))
343 rp_len = sizeof(*rp) + (2 * count);
344 rp = kmalloc(rp_len, GFP_ATOMIC);
346 read_unlock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (test_bit(HCI_SETUP, &d->dev_flags))
355 if (!mgmt_valid_hdev(d))
358 rp->index[count++] = cpu_to_le16(d->id);
359 BT_DBG("Added hci%u", d->id);
362 rp->num_controllers = cpu_to_le16(count);
363 rp_len = sizeof(*rp) + (2 * count);
365 read_unlock(&hci_dev_list_lock);
367 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
375 static u32 get_supported_settings(struct hci_dev *hdev)
379 settings |= MGMT_SETTING_POWERED;
380 settings |= MGMT_SETTING_PAIRABLE;
382 if (lmp_ssp_capable(hdev))
383 settings |= MGMT_SETTING_SSP;
385 if (lmp_bredr_capable(hdev)) {
386 settings |= MGMT_SETTING_CONNECTABLE;
387 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
388 settings |= MGMT_SETTING_FAST_CONNECTABLE;
389 settings |= MGMT_SETTING_DISCOVERABLE;
390 settings |= MGMT_SETTING_BREDR;
391 settings |= MGMT_SETTING_LINK_SECURITY;
395 settings |= MGMT_SETTING_HS;
397 if (lmp_le_capable(hdev))
398 settings |= MGMT_SETTING_LE;
403 static u32 get_current_settings(struct hci_dev *hdev)
407 if (hdev_is_powered(hdev))
408 settings |= MGMT_SETTING_POWERED;
410 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
411 settings |= MGMT_SETTING_CONNECTABLE;
413 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
414 settings |= MGMT_SETTING_FAST_CONNECTABLE;
416 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
417 settings |= MGMT_SETTING_DISCOVERABLE;
419 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
420 settings |= MGMT_SETTING_PAIRABLE;
422 if (lmp_bredr_capable(hdev))
423 settings |= MGMT_SETTING_BREDR;
425 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
426 settings |= MGMT_SETTING_LE;
428 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
429 settings |= MGMT_SETTING_LINK_SECURITY;
431 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
432 settings |= MGMT_SETTING_SSP;
434 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
435 settings |= MGMT_SETTING_HS;
440 #define PNP_INFO_SVCLASS_ID 0x1200
442 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
444 u8 *ptr = data, *uuids_start = NULL;
445 struct bt_uuid *uuid;
450 list_for_each_entry(uuid, &hdev->uuids, list) {
453 if (uuid->size != 16)
456 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
460 if (uuid16 == PNP_INFO_SVCLASS_ID)
466 uuids_start[1] = EIR_UUID16_ALL;
470 /* Stop if not enough space to put next UUID */
471 if ((ptr - data) + sizeof(u16) > len) {
472 uuids_start[1] = EIR_UUID16_SOME;
476 *ptr++ = (uuid16 & 0x00ff);
477 *ptr++ = (uuid16 & 0xff00) >> 8;
478 uuids_start[0] += sizeof(uuid16);
484 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
486 u8 *ptr = data, *uuids_start = NULL;
487 struct bt_uuid *uuid;
492 list_for_each_entry(uuid, &hdev->uuids, list) {
493 if (uuid->size != 32)
499 uuids_start[1] = EIR_UUID32_ALL;
503 /* Stop if not enough space to put next UUID */
504 if ((ptr - data) + sizeof(u32) > len) {
505 uuids_start[1] = EIR_UUID32_SOME;
509 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
511 uuids_start[0] += sizeof(u32);
517 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
519 u8 *ptr = data, *uuids_start = NULL;
520 struct bt_uuid *uuid;
525 list_for_each_entry(uuid, &hdev->uuids, list) {
526 if (uuid->size != 128)
532 uuids_start[1] = EIR_UUID128_ALL;
536 /* Stop if not enough space to put next UUID */
537 if ((ptr - data) + 16 > len) {
538 uuids_start[1] = EIR_UUID128_SOME;
542 memcpy(ptr, uuid->uuid, 16);
544 uuids_start[0] += 16;
550 static void create_eir(struct hci_dev *hdev, u8 *data)
555 name_len = strlen(hdev->dev_name);
561 ptr[1] = EIR_NAME_SHORT;
563 ptr[1] = EIR_NAME_COMPLETE;
565 /* EIR Data length */
566 ptr[0] = name_len + 1;
568 memcpy(ptr + 2, hdev->dev_name, name_len);
570 ptr += (name_len + 2);
573 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
575 ptr[1] = EIR_TX_POWER;
576 ptr[2] = (u8) hdev->inq_tx_power;
581 if (hdev->devid_source > 0) {
583 ptr[1] = EIR_DEVICE_ID;
585 put_unaligned_le16(hdev->devid_source, ptr + 2);
586 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
587 put_unaligned_le16(hdev->devid_product, ptr + 6);
588 put_unaligned_le16(hdev->devid_version, ptr + 8);
593 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
594 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
595 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
598 static void update_eir(struct hci_request *req)
600 struct hci_dev *hdev = req->hdev;
601 struct hci_cp_write_eir cp;
603 if (!hdev_is_powered(hdev))
606 if (!lmp_ext_inq_capable(hdev))
609 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
612 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
615 memset(&cp, 0, sizeof(cp));
617 create_eir(hdev, cp.data);
619 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
622 memcpy(hdev->eir, cp.data, sizeof(cp.data));
624 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
627 static u8 get_service_classes(struct hci_dev *hdev)
629 struct bt_uuid *uuid;
632 list_for_each_entry(uuid, &hdev->uuids, list)
633 val |= uuid->svc_hint;
638 static void update_class(struct hci_request *req)
640 struct hci_dev *hdev = req->hdev;
643 BT_DBG("%s", hdev->name);
645 if (!hdev_is_powered(hdev))
648 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
651 cod[0] = hdev->minor_class;
652 cod[1] = hdev->major_class;
653 cod[2] = get_service_classes(hdev);
655 if (memcmp(cod, hdev->dev_class, 3) == 0)
658 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
661 static void service_cache_off(struct work_struct *work)
663 struct hci_dev *hdev = container_of(work, struct hci_dev,
665 struct hci_request req;
667 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
670 hci_req_init(&req, hdev);
677 hci_dev_unlock(hdev);
679 hci_req_run(&req, NULL);
682 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
684 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
687 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
689 /* Non-mgmt controlled devices get this bit set
690 * implicitly so that pairing works for them, however
691 * for mgmt we require user-space to explicitly enable
694 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
697 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
698 void *data, u16 data_len)
700 struct mgmt_rp_read_info rp;
702 BT_DBG("sock %p %s", sk, hdev->name);
706 memset(&rp, 0, sizeof(rp));
708 bacpy(&rp.bdaddr, &hdev->bdaddr);
710 rp.version = hdev->hci_ver;
711 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
713 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
714 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
716 memcpy(rp.dev_class, hdev->dev_class, 3);
718 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
719 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
721 hci_dev_unlock(hdev);
723 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
727 static void mgmt_pending_free(struct pending_cmd *cmd)
734 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
735 struct hci_dev *hdev, void *data,
738 struct pending_cmd *cmd;
740 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
744 cmd->opcode = opcode;
745 cmd->index = hdev->id;
747 cmd->param = kmalloc(len, GFP_KERNEL);
754 memcpy(cmd->param, data, len);
759 list_add(&cmd->list, &hdev->mgmt_pending);
764 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
765 void (*cb)(struct pending_cmd *cmd,
769 struct pending_cmd *cmd, *tmp;
771 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
772 if (opcode > 0 && cmd->opcode != opcode)
779 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
781 struct pending_cmd *cmd;
783 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
784 if (cmd->opcode == opcode)
791 static void mgmt_pending_remove(struct pending_cmd *cmd)
793 list_del(&cmd->list);
794 mgmt_pending_free(cmd);
797 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
799 __le32 settings = cpu_to_le32(get_current_settings(hdev));
801 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
805 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
808 struct mgmt_mode *cp = data;
809 struct pending_cmd *cmd;
812 BT_DBG("request for %s", hdev->name);
814 if (cp->val != 0x00 && cp->val != 0x01)
815 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
816 MGMT_STATUS_INVALID_PARAMS);
820 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
821 cancel_delayed_work(&hdev->power_off);
824 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
826 err = mgmt_powered(hdev, 1);
831 if (!!cp->val == hdev_is_powered(hdev)) {
832 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
836 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
837 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
842 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
849 queue_work(hdev->req_workqueue, &hdev->power_on);
851 queue_work(hdev->req_workqueue, &hdev->power_off.work);
856 hci_dev_unlock(hdev);
860 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
861 struct sock *skip_sk)
864 struct mgmt_hdr *hdr;
866 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
870 hdr = (void *) skb_put(skb, sizeof(*hdr));
871 hdr->opcode = cpu_to_le16(event);
873 hdr->index = cpu_to_le16(hdev->id);
875 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
876 hdr->len = cpu_to_le16(data_len);
879 memcpy(skb_put(skb, data_len), data, data_len);
882 __net_timestamp(skb);
884 hci_send_to_control(skb, skip_sk);
890 static int new_settings(struct hci_dev *hdev, struct sock *skip)
894 ev = cpu_to_le32(get_current_settings(hdev));
896 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
899 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
902 struct mgmt_cp_set_discoverable *cp = data;
903 struct pending_cmd *cmd;
908 BT_DBG("request for %s", hdev->name);
910 if (!lmp_bredr_capable(hdev))
911 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
912 MGMT_STATUS_NOT_SUPPORTED);
914 if (cp->val != 0x00 && cp->val != 0x01)
915 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
916 MGMT_STATUS_INVALID_PARAMS);
918 timeout = __le16_to_cpu(cp->timeout);
919 if (!cp->val && timeout > 0)
920 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
921 MGMT_STATUS_INVALID_PARAMS);
925 if (!hdev_is_powered(hdev) && timeout > 0) {
926 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
927 MGMT_STATUS_NOT_POWERED);
931 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
932 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
933 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
938 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
939 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
940 MGMT_STATUS_REJECTED);
944 if (!hdev_is_powered(hdev)) {
945 bool changed = false;
947 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
948 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
952 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
957 err = new_settings(hdev, sk);
962 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
963 if (hdev->discov_timeout > 0) {
964 cancel_delayed_work(&hdev->discov_off);
965 hdev->discov_timeout = 0;
968 if (cp->val && timeout > 0) {
969 hdev->discov_timeout = timeout;
970 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
971 msecs_to_jiffies(hdev->discov_timeout * 1000));
974 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
978 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
987 scan |= SCAN_INQUIRY;
989 cancel_delayed_work(&hdev->discov_off);
991 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
993 mgmt_pending_remove(cmd);
996 hdev->discov_timeout = timeout;
999 hci_dev_unlock(hdev);
1003 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1005 struct pending_cmd *cmd;
1007 BT_DBG("status 0x%02x", status);
1011 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1015 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1017 mgmt_pending_remove(cmd);
1020 hci_dev_unlock(hdev);
1023 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1026 struct mgmt_mode *cp = data;
1027 struct pending_cmd *cmd;
1028 struct hci_request req;
1032 BT_DBG("request for %s", hdev->name);
1034 if (!lmp_bredr_capable(hdev))
1035 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1036 MGMT_STATUS_NOT_SUPPORTED);
1038 if (cp->val != 0x00 && cp->val != 0x01)
1039 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1040 MGMT_STATUS_INVALID_PARAMS);
1044 if (!hdev_is_powered(hdev)) {
1045 bool changed = false;
1047 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1051 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1053 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1054 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1057 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1062 err = new_settings(hdev, sk);
1067 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1068 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1069 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1074 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1075 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1079 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1090 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1091 hdev->discov_timeout > 0)
1092 cancel_delayed_work(&hdev->discov_off);
1095 hci_req_init(&req, hdev);
1097 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1099 err = hci_req_run(&req, set_connectable_complete);
1101 mgmt_pending_remove(cmd);
1104 hci_dev_unlock(hdev);
1108 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1111 struct mgmt_mode *cp = data;
1114 BT_DBG("request for %s", hdev->name);
1116 if (cp->val != 0x00 && cp->val != 0x01)
1117 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1118 MGMT_STATUS_INVALID_PARAMS);
1123 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1125 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1127 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1131 err = new_settings(hdev, sk);
1134 hci_dev_unlock(hdev);
1138 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1141 struct mgmt_mode *cp = data;
1142 struct pending_cmd *cmd;
1146 BT_DBG("request for %s", hdev->name);
1148 if (!lmp_bredr_capable(hdev))
1149 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1150 MGMT_STATUS_NOT_SUPPORTED);
1152 if (cp->val != 0x00 && cp->val != 0x01)
1153 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1154 MGMT_STATUS_INVALID_PARAMS);
1158 if (!hdev_is_powered(hdev)) {
1159 bool changed = false;
1161 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1162 &hdev->dev_flags)) {
1163 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1167 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1172 err = new_settings(hdev, sk);
1177 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1178 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1185 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1186 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1190 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1196 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1198 mgmt_pending_remove(cmd);
1203 hci_dev_unlock(hdev);
1207 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1209 struct mgmt_mode *cp = data;
1210 struct pending_cmd *cmd;
1214 BT_DBG("request for %s", hdev->name);
1216 if (!lmp_ssp_capable(hdev))
1217 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1218 MGMT_STATUS_NOT_SUPPORTED);
1220 if (cp->val != 0x00 && cp->val != 0x01)
1221 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1222 MGMT_STATUS_INVALID_PARAMS);
1228 if (!hdev_is_powered(hdev)) {
1229 bool changed = false;
1231 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1232 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1236 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1241 err = new_settings(hdev, sk);
1246 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1247 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1252 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1253 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1257 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1263 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1265 mgmt_pending_remove(cmd);
1270 hci_dev_unlock(hdev);
1274 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1276 struct mgmt_mode *cp = data;
1278 BT_DBG("request for %s", hdev->name);
1281 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1282 MGMT_STATUS_NOT_SUPPORTED);
1284 if (cp->val != 0x00 && cp->val != 0x01)
1285 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1286 MGMT_STATUS_INVALID_PARAMS);
1289 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1291 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1293 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1296 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1298 struct mgmt_mode *cp = data;
1299 struct hci_cp_write_le_host_supported hci_cp;
1300 struct pending_cmd *cmd;
1304 BT_DBG("request for %s", hdev->name);
1306 if (!lmp_le_capable(hdev))
1307 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1308 MGMT_STATUS_NOT_SUPPORTED);
1310 if (cp->val != 0x00 && cp->val != 0x01)
1311 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1312 MGMT_STATUS_INVALID_PARAMS);
1317 enabled = lmp_host_le_capable(hdev);
1319 if (!hdev_is_powered(hdev) || val == enabled) {
1320 bool changed = false;
1322 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1323 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1327 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1332 err = new_settings(hdev, sk);
1337 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1338 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1343 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1349 memset(&hci_cp, 0, sizeof(hci_cp));
1353 hci_cp.simul = lmp_le_br_capable(hdev);
1356 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1359 mgmt_pending_remove(cmd);
1362 hci_dev_unlock(hdev);
1366 /* This is a helper function to test for pending mgmt commands that can
1367 * cause CoD or EIR HCI commands. We can only allow one such pending
1368 * mgmt command at a time since otherwise we cannot easily track what
1369 * the current values are, will be, and based on that calculate if a new
1370 * HCI command needs to be sent and if yes with what value.
1372 static bool pending_eir_or_class(struct hci_dev *hdev)
1374 struct pending_cmd *cmd;
1376 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1377 switch (cmd->opcode) {
1378 case MGMT_OP_ADD_UUID:
1379 case MGMT_OP_REMOVE_UUID:
1380 case MGMT_OP_SET_DEV_CLASS:
1381 case MGMT_OP_SET_POWERED:
1389 static const u8 bluetooth_base_uuid[] = {
1390 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1391 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1394 static u8 get_uuid_size(const u8 *uuid)
1398 if (memcmp(uuid, bluetooth_base_uuid, 12))
1401 val = get_unaligned_le32(&uuid[12]);
1408 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1410 struct pending_cmd *cmd;
1414 cmd = mgmt_pending_find(mgmt_op, hdev);
1418 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1419 hdev->dev_class, 3);
1421 mgmt_pending_remove(cmd);
1424 hci_dev_unlock(hdev);
1427 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1429 BT_DBG("status 0x%02x", status);
1431 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1434 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1436 struct mgmt_cp_add_uuid *cp = data;
1437 struct pending_cmd *cmd;
1438 struct hci_request req;
1439 struct bt_uuid *uuid;
1442 BT_DBG("request for %s", hdev->name);
1446 if (pending_eir_or_class(hdev)) {
1447 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1452 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1458 memcpy(uuid->uuid, cp->uuid, 16);
1459 uuid->svc_hint = cp->svc_hint;
1460 uuid->size = get_uuid_size(cp->uuid);
1462 list_add_tail(&uuid->list, &hdev->uuids);
1464 hci_req_init(&req, hdev);
1469 err = hci_req_run(&req, add_uuid_complete);
1471 if (err != -ENODATA)
1474 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1475 hdev->dev_class, 3);
1479 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1488 hci_dev_unlock(hdev);
1492 static bool enable_service_cache(struct hci_dev *hdev)
1494 if (!hdev_is_powered(hdev))
1497 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1498 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1506 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1508 BT_DBG("status 0x%02x", status);
1510 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1513 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1516 struct mgmt_cp_remove_uuid *cp = data;
1517 struct pending_cmd *cmd;
1518 struct bt_uuid *match, *tmp;
1519 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1520 struct hci_request req;
1523 BT_DBG("request for %s", hdev->name);
1527 if (pending_eir_or_class(hdev)) {
1528 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1533 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1534 err = hci_uuids_clear(hdev);
1536 if (enable_service_cache(hdev)) {
1537 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1538 0, hdev->dev_class, 3);
1547 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1548 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1551 list_del(&match->list);
1557 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1558 MGMT_STATUS_INVALID_PARAMS);
1563 hci_req_init(&req, hdev);
1568 err = hci_req_run(&req, remove_uuid_complete);
1570 if (err != -ENODATA)
1573 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1574 hdev->dev_class, 3);
1578 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1587 hci_dev_unlock(hdev);
1591 static void set_class_complete(struct hci_dev *hdev, u8 status)
1593 BT_DBG("status 0x%02x", status);
1595 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1598 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1601 struct mgmt_cp_set_dev_class *cp = data;
1602 struct pending_cmd *cmd;
1603 struct hci_request req;
1606 BT_DBG("request for %s", hdev->name);
1608 if (!lmp_bredr_capable(hdev))
1609 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1610 MGMT_STATUS_NOT_SUPPORTED);
1614 if (pending_eir_or_class(hdev)) {
1615 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1620 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1621 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1622 MGMT_STATUS_INVALID_PARAMS);
1626 hdev->major_class = cp->major;
1627 hdev->minor_class = cp->minor;
1629 if (!hdev_is_powered(hdev)) {
1630 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1631 hdev->dev_class, 3);
1635 hci_req_init(&req, hdev);
1637 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1638 hci_dev_unlock(hdev);
1639 cancel_delayed_work_sync(&hdev->service_cache);
1646 err = hci_req_run(&req, set_class_complete);
1648 if (err != -ENODATA)
1651 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1652 hdev->dev_class, 3);
1656 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1665 hci_dev_unlock(hdev);
1669 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1672 struct mgmt_cp_load_link_keys *cp = data;
1673 u16 key_count, expected_len;
1676 key_count = __le16_to_cpu(cp->key_count);
1678 expected_len = sizeof(*cp) + key_count *
1679 sizeof(struct mgmt_link_key_info);
1680 if (expected_len != len) {
1681 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1683 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1684 MGMT_STATUS_INVALID_PARAMS);
1687 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1688 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1689 MGMT_STATUS_INVALID_PARAMS);
1691 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1694 for (i = 0; i < key_count; i++) {
1695 struct mgmt_link_key_info *key = &cp->keys[i];
1697 if (key->addr.type != BDADDR_BREDR)
1698 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1699 MGMT_STATUS_INVALID_PARAMS);
1704 hci_link_keys_clear(hdev);
1706 set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1709 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1711 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1713 for (i = 0; i < key_count; i++) {
1714 struct mgmt_link_key_info *key = &cp->keys[i];
1716 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1717 key->type, key->pin_len);
1720 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1722 hci_dev_unlock(hdev);
1727 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1728 u8 addr_type, struct sock *skip_sk)
1730 struct mgmt_ev_device_unpaired ev;
1732 bacpy(&ev.addr.bdaddr, bdaddr);
1733 ev.addr.type = addr_type;
1735 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1739 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1742 struct mgmt_cp_unpair_device *cp = data;
1743 struct mgmt_rp_unpair_device rp;
1744 struct hci_cp_disconnect dc;
1745 struct pending_cmd *cmd;
1746 struct hci_conn *conn;
1749 memset(&rp, 0, sizeof(rp));
1750 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1751 rp.addr.type = cp->addr.type;
1753 if (!bdaddr_type_is_valid(cp->addr.type))
1754 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1755 MGMT_STATUS_INVALID_PARAMS,
1758 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1759 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1760 MGMT_STATUS_INVALID_PARAMS,
1765 if (!hdev_is_powered(hdev)) {
1766 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1767 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1771 if (cp->addr.type == BDADDR_BREDR)
1772 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1774 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1777 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1778 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1782 if (cp->disconnect) {
1783 if (cp->addr.type == BDADDR_BREDR)
1784 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1787 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1794 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1796 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1800 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1807 dc.handle = cpu_to_le16(conn->handle);
1808 dc.reason = 0x13; /* Remote User Terminated Connection */
1809 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1811 mgmt_pending_remove(cmd);
1814 hci_dev_unlock(hdev);
1818 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1821 struct mgmt_cp_disconnect *cp = data;
1822 struct mgmt_rp_disconnect rp;
1823 struct hci_cp_disconnect dc;
1824 struct pending_cmd *cmd;
1825 struct hci_conn *conn;
1830 memset(&rp, 0, sizeof(rp));
1831 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1832 rp.addr.type = cp->addr.type;
1834 if (!bdaddr_type_is_valid(cp->addr.type))
1835 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1836 MGMT_STATUS_INVALID_PARAMS,
1841 if (!test_bit(HCI_UP, &hdev->flags)) {
1842 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1843 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1847 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1848 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1849 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1853 if (cp->addr.type == BDADDR_BREDR)
1854 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1857 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1859 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1860 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1861 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1865 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1871 dc.handle = cpu_to_le16(conn->handle);
1872 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1874 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1876 mgmt_pending_remove(cmd);
1879 hci_dev_unlock(hdev);
1883 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1885 switch (link_type) {
1887 switch (addr_type) {
1888 case ADDR_LE_DEV_PUBLIC:
1889 return BDADDR_LE_PUBLIC;
1892 /* Fallback to LE Random address type */
1893 return BDADDR_LE_RANDOM;
1897 /* Fallback to BR/EDR type */
1898 return BDADDR_BREDR;
1902 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1905 struct mgmt_rp_get_connections *rp;
1915 if (!hdev_is_powered(hdev)) {
1916 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1917 MGMT_STATUS_NOT_POWERED);
1922 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1923 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1927 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1928 rp = kmalloc(rp_len, GFP_KERNEL);
1935 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1936 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1938 bacpy(&rp->addr[i].bdaddr, &c->dst);
1939 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1940 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1945 rp->conn_count = cpu_to_le16(i);
1947 /* Recalculate length in case of filtered SCO connections, etc */
1948 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1950 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1956 hci_dev_unlock(hdev);
1960 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1961 struct mgmt_cp_pin_code_neg_reply *cp)
1963 struct pending_cmd *cmd;
1966 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
1971 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1972 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
1974 mgmt_pending_remove(cmd);
1979 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
1982 struct hci_conn *conn;
1983 struct mgmt_cp_pin_code_reply *cp = data;
1984 struct hci_cp_pin_code_reply reply;
1985 struct pending_cmd *cmd;
1992 if (!hdev_is_powered(hdev)) {
1993 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1994 MGMT_STATUS_NOT_POWERED);
1998 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2000 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2001 MGMT_STATUS_NOT_CONNECTED);
2005 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2006 struct mgmt_cp_pin_code_neg_reply ncp;
2008 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2010 BT_ERR("PIN code is not 16 bytes long");
2012 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2014 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2015 MGMT_STATUS_INVALID_PARAMS);
2020 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2026 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2027 reply.pin_len = cp->pin_len;
2028 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2030 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2032 mgmt_pending_remove(cmd);
2035 hci_dev_unlock(hdev);
2039 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2042 struct mgmt_cp_set_io_capability *cp = data;
2048 hdev->io_capability = cp->io_capability;
2050 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2051 hdev->io_capability);
2053 hci_dev_unlock(hdev);
2055 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2059 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2061 struct hci_dev *hdev = conn->hdev;
2062 struct pending_cmd *cmd;
2064 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2065 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2068 if (cmd->user_data != conn)
2077 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2079 struct mgmt_rp_pair_device rp;
2080 struct hci_conn *conn = cmd->user_data;
2082 bacpy(&rp.addr.bdaddr, &conn->dst);
2083 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2085 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2088 /* So we don't get further callbacks for this connection */
2089 conn->connect_cfm_cb = NULL;
2090 conn->security_cfm_cb = NULL;
2091 conn->disconn_cfm_cb = NULL;
2095 mgmt_pending_remove(cmd);
2098 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2100 struct pending_cmd *cmd;
2102 BT_DBG("status %u", status);
2104 cmd = find_pairing(conn);
2106 BT_DBG("Unable to find a pending command");
2108 pairing_complete(cmd, mgmt_status(status));
2111 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2113 struct pending_cmd *cmd;
2115 BT_DBG("status %u", status);
2120 cmd = find_pairing(conn);
2122 BT_DBG("Unable to find a pending command");
2124 pairing_complete(cmd, mgmt_status(status));
2127 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2130 struct mgmt_cp_pair_device *cp = data;
2131 struct mgmt_rp_pair_device rp;
2132 struct pending_cmd *cmd;
2133 u8 sec_level, auth_type;
2134 struct hci_conn *conn;
2139 memset(&rp, 0, sizeof(rp));
2140 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2141 rp.addr.type = cp->addr.type;
2143 if (!bdaddr_type_is_valid(cp->addr.type))
2144 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2145 MGMT_STATUS_INVALID_PARAMS,
2150 if (!hdev_is_powered(hdev)) {
2151 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2152 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2156 sec_level = BT_SECURITY_MEDIUM;
2157 if (cp->io_cap == 0x03)
2158 auth_type = HCI_AT_DEDICATED_BONDING;
2160 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2162 if (cp->addr.type == BDADDR_BREDR)
2163 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2164 cp->addr.type, sec_level, auth_type);
2166 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2167 cp->addr.type, sec_level, auth_type);
2172 if (PTR_ERR(conn) == -EBUSY)
2173 status = MGMT_STATUS_BUSY;
2175 status = MGMT_STATUS_CONNECT_FAILED;
2177 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2183 if (conn->connect_cfm_cb) {
2185 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2186 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2190 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2197 /* For LE, just connecting isn't a proof that the pairing finished */
2198 if (cp->addr.type == BDADDR_BREDR)
2199 conn->connect_cfm_cb = pairing_complete_cb;
2201 conn->connect_cfm_cb = le_connect_complete_cb;
2203 conn->security_cfm_cb = pairing_complete_cb;
2204 conn->disconn_cfm_cb = pairing_complete_cb;
2205 conn->io_capability = cp->io_cap;
2206 cmd->user_data = conn;
2208 if (conn->state == BT_CONNECTED &&
2209 hci_conn_security(conn, sec_level, auth_type))
2210 pairing_complete(cmd, 0);
2215 hci_dev_unlock(hdev);
2219 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2222 struct mgmt_addr_info *addr = data;
2223 struct pending_cmd *cmd;
2224 struct hci_conn *conn;
2231 if (!hdev_is_powered(hdev)) {
2232 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2233 MGMT_STATUS_NOT_POWERED);
2237 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2239 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2240 MGMT_STATUS_INVALID_PARAMS);
2244 conn = cmd->user_data;
2246 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2247 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2248 MGMT_STATUS_INVALID_PARAMS);
2252 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2254 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2255 addr, sizeof(*addr));
2257 hci_dev_unlock(hdev);
2261 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2262 bdaddr_t *bdaddr, u8 type, u16 mgmt_op,
2263 u16 hci_op, __le32 passkey)
2265 struct pending_cmd *cmd;
2266 struct hci_conn *conn;
2271 if (!hdev_is_powered(hdev)) {
2272 err = cmd_status(sk, hdev->id, mgmt_op,
2273 MGMT_STATUS_NOT_POWERED);
2277 if (type == BDADDR_BREDR)
2278 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
2280 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
2283 err = cmd_status(sk, hdev->id, mgmt_op,
2284 MGMT_STATUS_NOT_CONNECTED);
2288 if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
2289 /* Continue with pairing via SMP */
2290 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2293 err = cmd_status(sk, hdev->id, mgmt_op,
2294 MGMT_STATUS_SUCCESS);
2296 err = cmd_status(sk, hdev->id, mgmt_op,
2297 MGMT_STATUS_FAILED);
2302 cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr));
2308 /* Continue with pairing via HCI */
2309 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2310 struct hci_cp_user_passkey_reply cp;
2312 bacpy(&cp.bdaddr, bdaddr);
2313 cp.passkey = passkey;
2314 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2316 err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr);
2319 mgmt_pending_remove(cmd);
2322 hci_dev_unlock(hdev);
2326 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2327 void *data, u16 len)
2329 struct mgmt_cp_pin_code_neg_reply *cp = data;
2333 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2334 MGMT_OP_PIN_CODE_NEG_REPLY,
2335 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2338 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2341 struct mgmt_cp_user_confirm_reply *cp = data;
2345 if (len != sizeof(*cp))
2346 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2347 MGMT_STATUS_INVALID_PARAMS);
2349 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2350 MGMT_OP_USER_CONFIRM_REPLY,
2351 HCI_OP_USER_CONFIRM_REPLY, 0);
2354 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2355 void *data, u16 len)
2357 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2361 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2362 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2363 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2366 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2369 struct mgmt_cp_user_passkey_reply *cp = data;
2373 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2374 MGMT_OP_USER_PASSKEY_REPLY,
2375 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2378 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2379 void *data, u16 len)
2381 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2385 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2386 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2387 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2390 static void update_name(struct hci_request *req)
2392 struct hci_dev *hdev = req->hdev;
2393 struct hci_cp_write_local_name cp;
2395 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2397 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2400 static void set_name_complete(struct hci_dev *hdev, u8 status)
2402 struct mgmt_cp_set_local_name *cp;
2403 struct pending_cmd *cmd;
2405 BT_DBG("status 0x%02x", status);
2409 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2416 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2417 mgmt_status(status));
2419 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2422 mgmt_pending_remove(cmd);
2425 hci_dev_unlock(hdev);
2428 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2431 struct mgmt_cp_set_local_name *cp = data;
2432 struct pending_cmd *cmd;
2433 struct hci_request req;
2440 /* If the old values are the same as the new ones just return a
2441 * direct command complete event.
2443 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2444 !memcmp(hdev->short_name, cp->short_name,
2445 sizeof(hdev->short_name))) {
2446 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2451 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2453 if (!hdev_is_powered(hdev)) {
2454 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2456 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2461 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2467 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2473 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2475 hci_req_init(&req, hdev);
2477 if (lmp_bredr_capable(hdev)) {
2482 if (lmp_le_capable(hdev))
2483 hci_update_ad(&req);
2485 err = hci_req_run(&req, set_name_complete);
2487 mgmt_pending_remove(cmd);
2490 hci_dev_unlock(hdev);
2494 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2495 void *data, u16 data_len)
2497 struct pending_cmd *cmd;
2500 BT_DBG("%s", hdev->name);
2504 if (!hdev_is_powered(hdev)) {
2505 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2506 MGMT_STATUS_NOT_POWERED);
2510 if (!lmp_ssp_capable(hdev)) {
2511 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2512 MGMT_STATUS_NOT_SUPPORTED);
2516 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2517 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2522 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2528 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2530 mgmt_pending_remove(cmd);
2533 hci_dev_unlock(hdev);
2537 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2538 void *data, u16 len)
2540 struct mgmt_cp_add_remote_oob_data *cp = data;
2544 BT_DBG("%s ", hdev->name);
2548 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2551 status = MGMT_STATUS_FAILED;
2553 status = MGMT_STATUS_SUCCESS;
2555 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2556 &cp->addr, sizeof(cp->addr));
2558 hci_dev_unlock(hdev);
2562 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2563 void *data, u16 len)
2565 struct mgmt_cp_remove_remote_oob_data *cp = data;
2569 BT_DBG("%s", hdev->name);
2573 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2575 status = MGMT_STATUS_INVALID_PARAMS;
2577 status = MGMT_STATUS_SUCCESS;
2579 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2580 status, &cp->addr, sizeof(cp->addr));
2582 hci_dev_unlock(hdev);
2586 int mgmt_interleaved_discovery(struct hci_dev *hdev)
2590 BT_DBG("%s", hdev->name);
2594 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
2596 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2598 hci_dev_unlock(hdev);
2603 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2604 void *data, u16 len)
2606 struct mgmt_cp_start_discovery *cp = data;
2607 struct pending_cmd *cmd;
2610 BT_DBG("%s", hdev->name);
2614 if (!hdev_is_powered(hdev)) {
2615 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2616 MGMT_STATUS_NOT_POWERED);
2620 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2621 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2626 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2627 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2632 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2638 hdev->discovery.type = cp->type;
2640 switch (hdev->discovery.type) {
2641 case DISCOV_TYPE_BREDR:
2642 if (!lmp_bredr_capable(hdev)) {
2643 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2644 MGMT_STATUS_NOT_SUPPORTED);
2645 mgmt_pending_remove(cmd);
2649 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2652 case DISCOV_TYPE_LE:
2653 if (!lmp_host_le_capable(hdev)) {
2654 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2655 MGMT_STATUS_NOT_SUPPORTED);
2656 mgmt_pending_remove(cmd);
2660 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2661 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2664 case DISCOV_TYPE_INTERLEAVED:
2665 if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
2666 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2667 MGMT_STATUS_NOT_SUPPORTED);
2668 mgmt_pending_remove(cmd);
2672 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, LE_SCAN_WIN,
2673 LE_SCAN_TIMEOUT_BREDR_LE);
2677 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2678 MGMT_STATUS_INVALID_PARAMS);
2679 mgmt_pending_remove(cmd);
2684 mgmt_pending_remove(cmd);
2686 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2689 hci_dev_unlock(hdev);
2693 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2696 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2697 struct pending_cmd *cmd;
2698 struct hci_cp_remote_name_req_cancel cp;
2699 struct inquiry_entry *e;
2702 BT_DBG("%s", hdev->name);
2706 if (!hci_discovery_active(hdev)) {
2707 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2708 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2709 sizeof(mgmt_cp->type));
2713 if (hdev->discovery.type != mgmt_cp->type) {
2714 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2715 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2716 sizeof(mgmt_cp->type));
2720 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2726 switch (hdev->discovery.state) {
2727 case DISCOVERY_FINDING:
2728 if (test_bit(HCI_INQUIRY, &hdev->flags))
2729 err = hci_cancel_inquiry(hdev);
2731 err = hci_cancel_le_scan(hdev);
2735 case DISCOVERY_RESOLVING:
2736 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2739 mgmt_pending_remove(cmd);
2740 err = cmd_complete(sk, hdev->id,
2741 MGMT_OP_STOP_DISCOVERY, 0,
2743 sizeof(mgmt_cp->type));
2744 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2748 bacpy(&cp.bdaddr, &e->data.bdaddr);
2749 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2755 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2760 mgmt_pending_remove(cmd);
2762 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2765 hci_dev_unlock(hdev);
2769 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2772 struct mgmt_cp_confirm_name *cp = data;
2773 struct inquiry_entry *e;
2776 BT_DBG("%s", hdev->name);
2780 if (!hci_discovery_active(hdev)) {
2781 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2782 MGMT_STATUS_FAILED);
2786 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2788 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2789 MGMT_STATUS_INVALID_PARAMS);
2793 if (cp->name_known) {
2794 e->name_state = NAME_KNOWN;
2797 e->name_state = NAME_NEEDED;
2798 hci_inquiry_cache_update_resolve(hdev, e);
2801 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2805 hci_dev_unlock(hdev);
2809 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2812 struct mgmt_cp_block_device *cp = data;
2816 BT_DBG("%s", hdev->name);
2818 if (!bdaddr_type_is_valid(cp->addr.type))
2819 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2820 MGMT_STATUS_INVALID_PARAMS,
2821 &cp->addr, sizeof(cp->addr));
2825 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2827 status = MGMT_STATUS_FAILED;
2829 status = MGMT_STATUS_SUCCESS;
2831 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2832 &cp->addr, sizeof(cp->addr));
2834 hci_dev_unlock(hdev);
2839 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2842 struct mgmt_cp_unblock_device *cp = data;
2846 BT_DBG("%s", hdev->name);
2848 if (!bdaddr_type_is_valid(cp->addr.type))
2849 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
2850 MGMT_STATUS_INVALID_PARAMS,
2851 &cp->addr, sizeof(cp->addr));
2855 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2857 status = MGMT_STATUS_INVALID_PARAMS;
2859 status = MGMT_STATUS_SUCCESS;
2861 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2862 &cp->addr, sizeof(cp->addr));
2864 hci_dev_unlock(hdev);
2869 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2872 struct mgmt_cp_set_device_id *cp = data;
2873 struct hci_request req;
2877 BT_DBG("%s", hdev->name);
2879 source = __le16_to_cpu(cp->source);
2881 if (source > 0x0002)
2882 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2883 MGMT_STATUS_INVALID_PARAMS);
2887 hdev->devid_source = source;
2888 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2889 hdev->devid_product = __le16_to_cpu(cp->product);
2890 hdev->devid_version = __le16_to_cpu(cp->version);
2892 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2894 hci_req_init(&req, hdev);
2896 hci_req_run(&req, NULL);
2898 hci_dev_unlock(hdev);
2903 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
2905 struct pending_cmd *cmd;
2907 BT_DBG("status 0x%02x", status);
2911 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2916 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2917 mgmt_status(status));
2919 struct mgmt_mode *cp = cmd->param;
2922 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
2924 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
2926 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2927 new_settings(hdev, cmd->sk);
2930 mgmt_pending_remove(cmd);
2933 hci_dev_unlock(hdev);
2936 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2937 void *data, u16 len)
2939 struct mgmt_mode *cp = data;
2940 struct hci_cp_write_page_scan_activity acp;
2941 struct pending_cmd *cmd;
2942 struct hci_request req;
2946 BT_DBG("%s", hdev->name);
2948 if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
2949 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2950 MGMT_STATUS_NOT_SUPPORTED);
2952 if (cp->val != 0x00 && cp->val != 0x01)
2953 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2954 MGMT_STATUS_INVALID_PARAMS);
2956 if (!hdev_is_powered(hdev))
2957 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2958 MGMT_STATUS_NOT_POWERED);
2960 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2961 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2962 MGMT_STATUS_REJECTED);
2966 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
2967 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2972 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
2973 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
2979 type = PAGE_SCAN_TYPE_INTERLACED;
2981 /* 160 msec page scan interval */
2982 acp.interval = __constant_cpu_to_le16(0x0100);
2984 type = PAGE_SCAN_TYPE_STANDARD; /* default */
2986 /* default 1.28 sec page scan */
2987 acp.interval = __constant_cpu_to_le16(0x0800);
2990 /* default 11.25 msec page scan window */
2991 acp.window = __constant_cpu_to_le16(0x0012);
2993 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3000 hci_req_init(&req, hdev);
3002 hci_req_add(&req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp), &acp);
3003 hci_req_add(&req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
3005 err = hci_req_run(&req, fast_connectable_complete);
3007 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3008 MGMT_STATUS_FAILED);
3009 mgmt_pending_remove(cmd);
3013 hci_dev_unlock(hdev);
3018 static bool ltk_is_valid(struct mgmt_ltk_info *key)
3020 if (key->authenticated != 0x00 && key->authenticated != 0x01)
3022 if (key->master != 0x00 && key->master != 0x01)
3024 if (!bdaddr_type_is_le(key->addr.type))
3029 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3030 void *cp_data, u16 len)
3032 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3033 u16 key_count, expected_len;
3036 key_count = __le16_to_cpu(cp->key_count);
3038 expected_len = sizeof(*cp) + key_count *
3039 sizeof(struct mgmt_ltk_info);
3040 if (expected_len != len) {
3041 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3043 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3044 MGMT_STATUS_INVALID_PARAMS);
3047 BT_DBG("%s key_count %u", hdev->name, key_count);
3049 for (i = 0; i < key_count; i++) {
3050 struct mgmt_ltk_info *key = &cp->keys[i];
3052 if (!ltk_is_valid(key))
3053 return cmd_status(sk, hdev->id,
3054 MGMT_OP_LOAD_LONG_TERM_KEYS,
3055 MGMT_STATUS_INVALID_PARAMS);
3060 hci_smp_ltks_clear(hdev);
3062 for (i = 0; i < key_count; i++) {
3063 struct mgmt_ltk_info *key = &cp->keys[i];
3069 type = HCI_SMP_LTK_SLAVE;
3071 hci_add_ltk(hdev, &key->addr.bdaddr,
3072 bdaddr_to_le(key->addr.type),
3073 type, 0, key->authenticated, key->val,
3074 key->enc_size, key->ediv, key->rand);
3077 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3080 hci_dev_unlock(hdev);
3085 static const struct mgmt_handler {
3086 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3090 } mgmt_handlers[] = {
3091 { NULL }, /* 0x0000 (no command) */
3092 { read_version, false, MGMT_READ_VERSION_SIZE },
3093 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3094 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3095 { read_controller_info, false, MGMT_READ_INFO_SIZE },
3096 { set_powered, false, MGMT_SETTING_SIZE },
3097 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
3098 { set_connectable, false, MGMT_SETTING_SIZE },
3099 { set_fast_connectable, false, MGMT_SETTING_SIZE },
3100 { set_pairable, false, MGMT_SETTING_SIZE },
3101 { set_link_security, false, MGMT_SETTING_SIZE },
3102 { set_ssp, false, MGMT_SETTING_SIZE },
3103 { set_hs, false, MGMT_SETTING_SIZE },
3104 { set_le, false, MGMT_SETTING_SIZE },
3105 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
3106 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
3107 { add_uuid, false, MGMT_ADD_UUID_SIZE },
3108 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
3109 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
3110 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
3111 { disconnect, false, MGMT_DISCONNECT_SIZE },
3112 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
3113 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
3114 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
3115 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
3116 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
3117 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
3118 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
3119 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
3120 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
3121 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
3122 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
3123 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
3124 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
3125 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
3126 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3127 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3128 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3129 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3130 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3131 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3135 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3139 struct mgmt_hdr *hdr;
3140 u16 opcode, index, len;
3141 struct hci_dev *hdev = NULL;
3142 const struct mgmt_handler *handler;
3145 BT_DBG("got %zu bytes", msglen);
3147 if (msglen < sizeof(*hdr))
3150 buf = kmalloc(msglen, GFP_KERNEL);
3154 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3160 opcode = __le16_to_cpu(hdr->opcode);
3161 index = __le16_to_cpu(hdr->index);
3162 len = __le16_to_cpu(hdr->len);
3164 if (len != msglen - sizeof(*hdr)) {
3169 if (index != MGMT_INDEX_NONE) {
3170 hdev = hci_dev_get(index);
3172 err = cmd_status(sk, index, opcode,
3173 MGMT_STATUS_INVALID_INDEX);
3178 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3179 mgmt_handlers[opcode].func == NULL) {
3180 BT_DBG("Unknown op %u", opcode);
3181 err = cmd_status(sk, index, opcode,
3182 MGMT_STATUS_UNKNOWN_COMMAND);
3186 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3187 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3188 err = cmd_status(sk, index, opcode,
3189 MGMT_STATUS_INVALID_INDEX);
3193 handler = &mgmt_handlers[opcode];
3195 if ((handler->var_len && len < handler->data_len) ||
3196 (!handler->var_len && len != handler->data_len)) {
3197 err = cmd_status(sk, index, opcode,
3198 MGMT_STATUS_INVALID_PARAMS);
3203 mgmt_init_hdev(sk, hdev);
3205 cp = buf + sizeof(*hdr);
3207 err = handler->func(sk, hdev, cp, len);
3221 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
3225 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
3226 mgmt_pending_remove(cmd);
3229 int mgmt_index_added(struct hci_dev *hdev)
3231 if (!mgmt_valid_hdev(hdev))
3234 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3237 int mgmt_index_removed(struct hci_dev *hdev)
3239 u8 status = MGMT_STATUS_INVALID_INDEX;
3241 if (!mgmt_valid_hdev(hdev))
3244 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3246 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3251 struct hci_dev *hdev;
3255 static void settings_rsp(struct pending_cmd *cmd, void *data)
3257 struct cmd_lookup *match = data;
3259 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
3261 list_del(&cmd->list);
3263 if (match->sk == NULL) {
3264 match->sk = cmd->sk;
3265 sock_hold(match->sk);
3268 mgmt_pending_free(cmd);
3271 static void set_bredr_scan(struct hci_request *req)
3273 struct hci_dev *hdev = req->hdev;
3276 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3278 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3279 scan |= SCAN_INQUIRY;
3282 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3285 static void powered_complete(struct hci_dev *hdev, u8 status)
3287 struct cmd_lookup match = { NULL, hdev };
3289 BT_DBG("status 0x%02x", status);
3293 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3295 new_settings(hdev, match.sk);
3297 hci_dev_unlock(hdev);
3303 static int powered_update_hci(struct hci_dev *hdev)
3305 struct hci_request req;
3308 hci_req_init(&req, hdev);
3310 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3311 !lmp_host_ssp_capable(hdev)) {
3314 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3317 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
3318 struct hci_cp_write_le_host_supported cp;
3321 cp.simul = lmp_le_br_capable(hdev);
3323 /* Check first if we already have the right
3324 * host state (host features set)
3326 if (cp.le != lmp_host_le_capable(hdev) ||
3327 cp.simul != lmp_host_le_br_capable(hdev))
3328 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3332 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3333 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3334 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3335 sizeof(link_sec), &link_sec);
3337 if (lmp_bredr_capable(hdev)) {
3338 set_bredr_scan(&req);
3344 return hci_req_run(&req, powered_complete);
3347 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3349 struct cmd_lookup match = { NULL, hdev };
3350 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3351 u8 zero_cod[] = { 0, 0, 0 };
3354 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3358 if (powered_update_hci(hdev) == 0)
3361 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3366 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3367 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3369 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3370 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3371 zero_cod, sizeof(zero_cod), NULL);
3374 err = new_settings(hdev, match.sk);
3382 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3384 struct cmd_lookup match = { NULL, hdev };
3385 bool changed = false;
3389 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3392 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3396 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3400 err = new_settings(hdev, match.sk);
3408 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3410 struct pending_cmd *cmd;
3411 bool changed = false;
3415 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3418 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3422 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
3425 err = new_settings(hdev, cmd ? cmd->sk : NULL);
3430 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3432 u8 mgmt_err = mgmt_status(status);
3434 if (scan & SCAN_PAGE)
3435 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3436 cmd_status_rsp, &mgmt_err);
3438 if (scan & SCAN_INQUIRY)
3439 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3440 cmd_status_rsp, &mgmt_err);
3445 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3448 struct mgmt_ev_new_link_key ev;
3450 memset(&ev, 0, sizeof(ev));
3452 ev.store_hint = persistent;
3453 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3454 ev.key.addr.type = BDADDR_BREDR;
3455 ev.key.type = key->type;
3456 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3457 ev.key.pin_len = key->pin_len;
3459 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3462 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3464 struct mgmt_ev_new_long_term_key ev;
3466 memset(&ev, 0, sizeof(ev));
3468 ev.store_hint = persistent;
3469 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3470 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3471 ev.key.authenticated = key->authenticated;
3472 ev.key.enc_size = key->enc_size;
3473 ev.key.ediv = key->ediv;
3475 if (key->type == HCI_SMP_LTK)
3478 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3479 memcpy(ev.key.val, key->val, sizeof(key->val));
3481 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3485 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3486 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3490 struct mgmt_ev_device_connected *ev = (void *) buf;
3493 bacpy(&ev->addr.bdaddr, bdaddr);
3494 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3496 ev->flags = __cpu_to_le32(flags);
3499 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3502 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3503 eir_len = eir_append_data(ev->eir, eir_len,
3504 EIR_CLASS_OF_DEV, dev_class, 3);
3506 ev->eir_len = cpu_to_le16(eir_len);
3508 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3509 sizeof(*ev) + eir_len, NULL);
3512 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3514 struct mgmt_cp_disconnect *cp = cmd->param;
3515 struct sock **sk = data;
3516 struct mgmt_rp_disconnect rp;
3518 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3519 rp.addr.type = cp->addr.type;
3521 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3527 mgmt_pending_remove(cmd);
3530 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3532 struct hci_dev *hdev = data;
3533 struct mgmt_cp_unpair_device *cp = cmd->param;
3534 struct mgmt_rp_unpair_device rp;
3536 memset(&rp, 0, sizeof(rp));
3537 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3538 rp.addr.type = cp->addr.type;
3540 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3542 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3544 mgmt_pending_remove(cmd);
3547 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3548 u8 link_type, u8 addr_type, u8 reason)
3550 struct mgmt_ev_device_disconnected ev;
3551 struct sock *sk = NULL;
3554 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3556 bacpy(&ev.addr.bdaddr, bdaddr);
3557 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3560 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3566 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3572 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3573 u8 link_type, u8 addr_type, u8 status)
3575 struct mgmt_rp_disconnect rp;
3576 struct pending_cmd *cmd;
3579 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3582 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3586 bacpy(&rp.addr.bdaddr, bdaddr);
3587 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3589 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3590 mgmt_status(status), &rp, sizeof(rp));
3592 mgmt_pending_remove(cmd);
3597 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3598 u8 addr_type, u8 status)
3600 struct mgmt_ev_connect_failed ev;
3602 bacpy(&ev.addr.bdaddr, bdaddr);
3603 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3604 ev.status = mgmt_status(status);
3606 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3609 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3611 struct mgmt_ev_pin_code_request ev;
3613 bacpy(&ev.addr.bdaddr, bdaddr);
3614 ev.addr.type = BDADDR_BREDR;
3617 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3621 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3624 struct pending_cmd *cmd;
3625 struct mgmt_rp_pin_code_reply rp;
3628 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3632 bacpy(&rp.addr.bdaddr, bdaddr);
3633 rp.addr.type = BDADDR_BREDR;
3635 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3636 mgmt_status(status), &rp, sizeof(rp));
3638 mgmt_pending_remove(cmd);
3643 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3646 struct pending_cmd *cmd;
3647 struct mgmt_rp_pin_code_reply rp;
3650 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3654 bacpy(&rp.addr.bdaddr, bdaddr);
3655 rp.addr.type = BDADDR_BREDR;
3657 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3658 mgmt_status(status), &rp, sizeof(rp));
3660 mgmt_pending_remove(cmd);
3665 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3666 u8 link_type, u8 addr_type, __le32 value,
3669 struct mgmt_ev_user_confirm_request ev;
3671 BT_DBG("%s", hdev->name);
3673 bacpy(&ev.addr.bdaddr, bdaddr);
3674 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3675 ev.confirm_hint = confirm_hint;
3678 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3682 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3683 u8 link_type, u8 addr_type)
3685 struct mgmt_ev_user_passkey_request ev;
3687 BT_DBG("%s", hdev->name);
3689 bacpy(&ev.addr.bdaddr, bdaddr);
3690 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3692 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3696 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3697 u8 link_type, u8 addr_type, u8 status,
3700 struct pending_cmd *cmd;
3701 struct mgmt_rp_user_confirm_reply rp;
3704 cmd = mgmt_pending_find(opcode, hdev);
3708 bacpy(&rp.addr.bdaddr, bdaddr);
3709 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3710 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3713 mgmt_pending_remove(cmd);
3718 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3719 u8 link_type, u8 addr_type, u8 status)
3721 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3722 status, MGMT_OP_USER_CONFIRM_REPLY);
3725 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3726 u8 link_type, u8 addr_type, u8 status)
3728 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3730 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3733 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3734 u8 link_type, u8 addr_type, u8 status)
3736 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3737 status, MGMT_OP_USER_PASSKEY_REPLY);
3740 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3741 u8 link_type, u8 addr_type, u8 status)
3743 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3745 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3748 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3749 u8 link_type, u8 addr_type, u32 passkey,
3752 struct mgmt_ev_passkey_notify ev;
3754 BT_DBG("%s", hdev->name);
3756 bacpy(&ev.addr.bdaddr, bdaddr);
3757 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3758 ev.passkey = __cpu_to_le32(passkey);
3759 ev.entered = entered;
3761 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3764 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3765 u8 addr_type, u8 status)
3767 struct mgmt_ev_auth_failed ev;
3769 bacpy(&ev.addr.bdaddr, bdaddr);
3770 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3771 ev.status = mgmt_status(status);
3773 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3776 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3778 struct cmd_lookup match = { NULL, hdev };
3779 bool changed = false;
3783 u8 mgmt_err = mgmt_status(status);
3784 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3785 cmd_status_rsp, &mgmt_err);
3789 if (test_bit(HCI_AUTH, &hdev->flags)) {
3790 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3793 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3797 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3801 err = new_settings(hdev, match.sk);
3809 static void clear_eir(struct hci_request *req)
3811 struct hci_dev *hdev = req->hdev;
3812 struct hci_cp_write_eir cp;
3814 if (!lmp_ext_inq_capable(hdev))
3817 memset(hdev->eir, 0, sizeof(hdev->eir));
3819 memset(&cp, 0, sizeof(cp));
3821 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3824 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3826 struct cmd_lookup match = { NULL, hdev };
3827 struct hci_request req;
3828 bool changed = false;
3832 u8 mgmt_err = mgmt_status(status);
3834 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
3836 err = new_settings(hdev, NULL);
3838 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3845 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3848 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3852 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
3855 err = new_settings(hdev, match.sk);
3860 hci_req_init(&req, hdev);
3862 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3867 hci_req_run(&req, NULL);
3872 static void sk_lookup(struct pending_cmd *cmd, void *data)
3874 struct cmd_lookup *match = data;
3876 if (match->sk == NULL) {
3877 match->sk = cmd->sk;
3878 sock_hold(match->sk);
3882 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3885 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3888 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
3889 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
3890 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
3893 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3902 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
3904 struct mgmt_cp_set_local_name ev;
3905 struct pending_cmd *cmd;
3910 memset(&ev, 0, sizeof(ev));
3911 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3912 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
3914 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3916 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3918 /* If this is a HCI command related to powering on the
3919 * HCI dev don't send any mgmt signals.
3921 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
3925 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
3926 cmd ? cmd->sk : NULL);
3929 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
3930 u8 *randomizer, u8 status)
3932 struct pending_cmd *cmd;
3935 BT_DBG("%s status %u", hdev->name, status);
3937 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3942 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3943 mgmt_status(status));
3945 struct mgmt_rp_read_local_oob_data rp;
3947 memcpy(rp.hash, hash, sizeof(rp.hash));
3948 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
3950 err = cmd_complete(cmd->sk, hdev->id,
3951 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
3955 mgmt_pending_remove(cmd);
3960 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3962 struct cmd_lookup match = { NULL, hdev };
3963 bool changed = false;
3967 u8 mgmt_err = mgmt_status(status);
3969 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
3971 err = new_settings(hdev, NULL);
3973 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
3980 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3983 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3987 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
3990 err = new_settings(hdev, match.sk);
3998 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3999 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4000 ssp, u8 *eir, u16 eir_len)
4003 struct mgmt_ev_device_found *ev = (void *) buf;
4006 /* Leave 5 bytes for a potential CoD field */
4007 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4010 memset(buf, 0, sizeof(buf));
4012 bacpy(&ev->addr.bdaddr, bdaddr);
4013 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4016 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4018 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4021 memcpy(ev->eir, eir, eir_len);
4023 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4024 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4027 ev->eir_len = cpu_to_le16(eir_len);
4028 ev_size = sizeof(*ev) + eir_len;
4030 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4033 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4034 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4036 struct mgmt_ev_device_found *ev;
4037 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4040 ev = (struct mgmt_ev_device_found *) buf;
4042 memset(buf, 0, sizeof(buf));
4044 bacpy(&ev->addr.bdaddr, bdaddr);
4045 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4048 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4051 ev->eir_len = cpu_to_le16(eir_len);
4053 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
4054 sizeof(*ev) + eir_len, NULL);
4057 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
4059 struct pending_cmd *cmd;
4063 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4065 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4069 type = hdev->discovery.type;
4071 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4072 &type, sizeof(type));
4073 mgmt_pending_remove(cmd);
4078 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
4080 struct pending_cmd *cmd;
4083 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4087 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4088 &hdev->discovery.type, sizeof(hdev->discovery.type));
4089 mgmt_pending_remove(cmd);
4094 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4096 struct mgmt_ev_discovering ev;
4097 struct pending_cmd *cmd;
4099 BT_DBG("%s discovering %u", hdev->name, discovering);
4102 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4104 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4107 u8 type = hdev->discovery.type;
4109 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4111 mgmt_pending_remove(cmd);
4114 memset(&ev, 0, sizeof(ev));
4115 ev.type = hdev->discovery.type;
4116 ev.discovering = discovering;
4118 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4121 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4123 struct pending_cmd *cmd;
4124 struct mgmt_ev_device_blocked ev;
4126 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4128 bacpy(&ev.addr.bdaddr, bdaddr);
4129 ev.addr.type = type;
4131 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4132 cmd ? cmd->sk : NULL);
4135 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4137 struct pending_cmd *cmd;
4138 struct mgmt_ev_device_unblocked ev;
4140 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4142 bacpy(&ev.addr.bdaddr, bdaddr);
4143 ev.addr.type = type;
4145 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4146 cmd ? cmd->sk : NULL);
4149 module_param(enable_hs, bool, 0644);
4150 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");