2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
84 MGMT_EV_INDEX_REMOVED,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
106 * These LE scan and inquiry parameters were chosen according to LE General
107 * Discovery Procedure specification.
109 #define LE_SCAN_TYPE 0x01
110 #define LE_SCAN_WIN 0x12
111 #define LE_SCAN_INT 0x12
112 #define LE_SCAN_TIMEOUT_LE_ONLY 10240 /* TGAP(gen_disc_scan_min) */
113 #define LE_SCAN_TIMEOUT_BREDR_LE 5120 /* TGAP(100)/2 */
115 #define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
116 #define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
118 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
120 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
121 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
124 struct list_head list;
132 /* HCI to MGMT error code conversion table */
133 static u8 mgmt_status_table[] = {
135 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
136 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
137 MGMT_STATUS_FAILED, /* Hardware Failure */
138 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
139 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
140 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
141 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
142 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
143 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
144 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
145 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
146 MGMT_STATUS_BUSY, /* Command Disallowed */
147 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
148 MGMT_STATUS_REJECTED, /* Rejected Security */
149 MGMT_STATUS_REJECTED, /* Rejected Personal */
150 MGMT_STATUS_TIMEOUT, /* Host Timeout */
151 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
152 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
153 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
154 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
155 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
156 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
157 MGMT_STATUS_BUSY, /* Repeated Attempts */
158 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
159 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
161 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
162 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
163 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
165 MGMT_STATUS_FAILED, /* Unspecified Error */
166 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
167 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
168 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
169 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
170 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
171 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
172 MGMT_STATUS_FAILED, /* Unit Link Key Used */
173 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
174 MGMT_STATUS_TIMEOUT, /* Instant Passed */
175 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
176 MGMT_STATUS_FAILED, /* Transaction Collision */
177 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
178 MGMT_STATUS_REJECTED, /* QoS Rejected */
179 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
180 MGMT_STATUS_REJECTED, /* Insufficient Security */
181 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
182 MGMT_STATUS_BUSY, /* Role Switch Pending */
183 MGMT_STATUS_FAILED, /* Slot Violation */
184 MGMT_STATUS_FAILED, /* Role Switch Failed */
185 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
186 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
187 MGMT_STATUS_BUSY, /* Host Busy Pairing */
188 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
189 MGMT_STATUS_BUSY, /* Controller Busy */
190 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
191 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
192 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
193 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
194 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
197 bool mgmt_valid_hdev(struct hci_dev *hdev)
199 return hdev->dev_type == HCI_BREDR;
202 static u8 mgmt_status(u8 hci_status)
204 if (hci_status < ARRAY_SIZE(mgmt_status_table))
205 return mgmt_status_table[hci_status];
207 return MGMT_STATUS_FAILED;
210 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
213 struct mgmt_hdr *hdr;
214 struct mgmt_ev_cmd_status *ev;
217 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
219 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
223 hdr = (void *) skb_put(skb, sizeof(*hdr));
225 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
226 hdr->index = cpu_to_le16(index);
227 hdr->len = cpu_to_le16(sizeof(*ev));
229 ev = (void *) skb_put(skb, sizeof(*ev));
231 ev->opcode = cpu_to_le16(cmd);
233 err = sock_queue_rcv_skb(sk, skb);
240 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
241 void *rp, size_t rp_len)
244 struct mgmt_hdr *hdr;
245 struct mgmt_ev_cmd_complete *ev;
248 BT_DBG("sock %p", sk);
250 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
254 hdr = (void *) skb_put(skb, sizeof(*hdr));
256 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
257 hdr->index = cpu_to_le16(index);
258 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
260 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
261 ev->opcode = cpu_to_le16(cmd);
265 memcpy(ev->data, rp, rp_len);
267 err = sock_queue_rcv_skb(sk, skb);
274 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
277 struct mgmt_rp_read_version rp;
279 BT_DBG("sock %p", sk);
281 rp.version = MGMT_VERSION;
282 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
284 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
288 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
291 struct mgmt_rp_read_commands *rp;
292 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
293 const u16 num_events = ARRAY_SIZE(mgmt_events);
298 BT_DBG("sock %p", sk);
300 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
302 rp = kmalloc(rp_size, GFP_KERNEL);
306 rp->num_commands = __constant_cpu_to_le16(num_commands);
307 rp->num_events = __constant_cpu_to_le16(num_events);
309 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
310 put_unaligned_le16(mgmt_commands[i], opcode);
312 for (i = 0; i < num_events; i++, opcode++)
313 put_unaligned_le16(mgmt_events[i], opcode);
315 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
322 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
325 struct mgmt_rp_read_index_list *rp;
331 BT_DBG("sock %p", sk);
333 read_lock(&hci_dev_list_lock);
336 list_for_each_entry(d, &hci_dev_list, list) {
337 if (!mgmt_valid_hdev(d))
343 rp_len = sizeof(*rp) + (2 * count);
344 rp = kmalloc(rp_len, GFP_ATOMIC);
346 read_unlock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (test_bit(HCI_SETUP, &d->dev_flags))
355 if (!mgmt_valid_hdev(d))
358 rp->index[count++] = cpu_to_le16(d->id);
359 BT_DBG("Added hci%u", d->id);
362 rp->num_controllers = cpu_to_le16(count);
363 rp_len = sizeof(*rp) + (2 * count);
365 read_unlock(&hci_dev_list_lock);
367 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
375 static u32 get_supported_settings(struct hci_dev *hdev)
379 settings |= MGMT_SETTING_POWERED;
380 settings |= MGMT_SETTING_PAIRABLE;
382 if (lmp_ssp_capable(hdev))
383 settings |= MGMT_SETTING_SSP;
385 if (lmp_bredr_capable(hdev)) {
386 settings |= MGMT_SETTING_CONNECTABLE;
387 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
388 settings |= MGMT_SETTING_FAST_CONNECTABLE;
389 settings |= MGMT_SETTING_DISCOVERABLE;
390 settings |= MGMT_SETTING_BREDR;
391 settings |= MGMT_SETTING_LINK_SECURITY;
395 settings |= MGMT_SETTING_HS;
397 if (lmp_le_capable(hdev))
398 settings |= MGMT_SETTING_LE;
403 static u32 get_current_settings(struct hci_dev *hdev)
407 if (hdev_is_powered(hdev))
408 settings |= MGMT_SETTING_POWERED;
410 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
411 settings |= MGMT_SETTING_CONNECTABLE;
413 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
414 settings |= MGMT_SETTING_FAST_CONNECTABLE;
416 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
417 settings |= MGMT_SETTING_DISCOVERABLE;
419 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
420 settings |= MGMT_SETTING_PAIRABLE;
422 if (lmp_bredr_capable(hdev))
423 settings |= MGMT_SETTING_BREDR;
425 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
426 settings |= MGMT_SETTING_LE;
428 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
429 settings |= MGMT_SETTING_LINK_SECURITY;
431 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
432 settings |= MGMT_SETTING_SSP;
434 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
435 settings |= MGMT_SETTING_HS;
440 #define PNP_INFO_SVCLASS_ID 0x1200
442 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
444 u8 *ptr = data, *uuids_start = NULL;
445 struct bt_uuid *uuid;
450 list_for_each_entry(uuid, &hdev->uuids, list) {
453 if (uuid->size != 16)
456 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
460 if (uuid16 == PNP_INFO_SVCLASS_ID)
466 uuids_start[1] = EIR_UUID16_ALL;
470 /* Stop if not enough space to put next UUID */
471 if ((ptr - data) + sizeof(u16) > len) {
472 uuids_start[1] = EIR_UUID16_SOME;
476 *ptr++ = (uuid16 & 0x00ff);
477 *ptr++ = (uuid16 & 0xff00) >> 8;
478 uuids_start[0] += sizeof(uuid16);
484 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
486 u8 *ptr = data, *uuids_start = NULL;
487 struct bt_uuid *uuid;
492 list_for_each_entry(uuid, &hdev->uuids, list) {
493 if (uuid->size != 32)
499 uuids_start[1] = EIR_UUID32_ALL;
503 /* Stop if not enough space to put next UUID */
504 if ((ptr - data) + sizeof(u32) > len) {
505 uuids_start[1] = EIR_UUID32_SOME;
509 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
511 uuids_start[0] += sizeof(u32);
517 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
519 u8 *ptr = data, *uuids_start = NULL;
520 struct bt_uuid *uuid;
525 list_for_each_entry(uuid, &hdev->uuids, list) {
526 if (uuid->size != 128)
532 uuids_start[1] = EIR_UUID128_ALL;
536 /* Stop if not enough space to put next UUID */
537 if ((ptr - data) + 16 > len) {
538 uuids_start[1] = EIR_UUID128_SOME;
542 memcpy(ptr, uuid->uuid, 16);
544 uuids_start[0] += 16;
550 static void create_eir(struct hci_dev *hdev, u8 *data)
555 name_len = strlen(hdev->dev_name);
561 ptr[1] = EIR_NAME_SHORT;
563 ptr[1] = EIR_NAME_COMPLETE;
565 /* EIR Data length */
566 ptr[0] = name_len + 1;
568 memcpy(ptr + 2, hdev->dev_name, name_len);
570 ptr += (name_len + 2);
573 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
575 ptr[1] = EIR_TX_POWER;
576 ptr[2] = (u8) hdev->inq_tx_power;
581 if (hdev->devid_source > 0) {
583 ptr[1] = EIR_DEVICE_ID;
585 put_unaligned_le16(hdev->devid_source, ptr + 2);
586 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
587 put_unaligned_le16(hdev->devid_product, ptr + 6);
588 put_unaligned_le16(hdev->devid_version, ptr + 8);
593 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
594 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
595 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
598 static void update_eir(struct hci_request *req)
600 struct hci_dev *hdev = req->hdev;
601 struct hci_cp_write_eir cp;
603 if (!hdev_is_powered(hdev))
606 if (!lmp_ext_inq_capable(hdev))
609 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
612 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
615 memset(&cp, 0, sizeof(cp));
617 create_eir(hdev, cp.data);
619 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
622 memcpy(hdev->eir, cp.data, sizeof(cp.data));
624 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
627 static u8 get_service_classes(struct hci_dev *hdev)
629 struct bt_uuid *uuid;
632 list_for_each_entry(uuid, &hdev->uuids, list)
633 val |= uuid->svc_hint;
638 static void update_class(struct hci_request *req)
640 struct hci_dev *hdev = req->hdev;
643 BT_DBG("%s", hdev->name);
645 if (!hdev_is_powered(hdev))
648 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
651 cod[0] = hdev->minor_class;
652 cod[1] = hdev->major_class;
653 cod[2] = get_service_classes(hdev);
655 if (memcmp(cod, hdev->dev_class, 3) == 0)
658 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
661 static void service_cache_off(struct work_struct *work)
663 struct hci_dev *hdev = container_of(work, struct hci_dev,
665 struct hci_request req;
667 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
670 hci_req_init(&req, hdev);
677 hci_dev_unlock(hdev);
679 hci_req_run(&req, NULL);
682 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
684 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
687 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
689 /* Non-mgmt controlled devices get this bit set
690 * implicitly so that pairing works for them, however
691 * for mgmt we require user-space to explicitly enable
694 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
697 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
698 void *data, u16 data_len)
700 struct mgmt_rp_read_info rp;
702 BT_DBG("sock %p %s", sk, hdev->name);
706 memset(&rp, 0, sizeof(rp));
708 bacpy(&rp.bdaddr, &hdev->bdaddr);
710 rp.version = hdev->hci_ver;
711 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
713 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
714 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
716 memcpy(rp.dev_class, hdev->dev_class, 3);
718 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
719 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
721 hci_dev_unlock(hdev);
723 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
727 static void mgmt_pending_free(struct pending_cmd *cmd)
734 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
735 struct hci_dev *hdev, void *data,
738 struct pending_cmd *cmd;
740 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
744 cmd->opcode = opcode;
745 cmd->index = hdev->id;
747 cmd->param = kmalloc(len, GFP_KERNEL);
754 memcpy(cmd->param, data, len);
759 list_add(&cmd->list, &hdev->mgmt_pending);
764 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
765 void (*cb)(struct pending_cmd *cmd,
769 struct pending_cmd *cmd, *tmp;
771 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
772 if (opcode > 0 && cmd->opcode != opcode)
779 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
781 struct pending_cmd *cmd;
783 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
784 if (cmd->opcode == opcode)
791 static void mgmt_pending_remove(struct pending_cmd *cmd)
793 list_del(&cmd->list);
794 mgmt_pending_free(cmd);
797 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
799 __le32 settings = cpu_to_le32(get_current_settings(hdev));
801 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
805 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
808 struct mgmt_mode *cp = data;
809 struct pending_cmd *cmd;
812 BT_DBG("request for %s", hdev->name);
814 if (cp->val != 0x00 && cp->val != 0x01)
815 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
816 MGMT_STATUS_INVALID_PARAMS);
820 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
821 cancel_delayed_work(&hdev->power_off);
824 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
826 err = mgmt_powered(hdev, 1);
831 if (!!cp->val == hdev_is_powered(hdev)) {
832 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
836 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
837 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
842 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
849 queue_work(hdev->req_workqueue, &hdev->power_on);
851 queue_work(hdev->req_workqueue, &hdev->power_off.work);
856 hci_dev_unlock(hdev);
860 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
861 struct sock *skip_sk)
864 struct mgmt_hdr *hdr;
866 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
870 hdr = (void *) skb_put(skb, sizeof(*hdr));
871 hdr->opcode = cpu_to_le16(event);
873 hdr->index = cpu_to_le16(hdev->id);
875 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
876 hdr->len = cpu_to_le16(data_len);
879 memcpy(skb_put(skb, data_len), data, data_len);
882 __net_timestamp(skb);
884 hci_send_to_control(skb, skip_sk);
890 static int new_settings(struct hci_dev *hdev, struct sock *skip)
894 ev = cpu_to_le32(get_current_settings(hdev));
896 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
899 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
902 struct mgmt_cp_set_discoverable *cp = data;
903 struct pending_cmd *cmd;
908 BT_DBG("request for %s", hdev->name);
910 if (!lmp_bredr_capable(hdev))
911 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
912 MGMT_STATUS_NOT_SUPPORTED);
914 if (cp->val != 0x00 && cp->val != 0x01)
915 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
916 MGMT_STATUS_INVALID_PARAMS);
918 timeout = __le16_to_cpu(cp->timeout);
919 if (!cp->val && timeout > 0)
920 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
921 MGMT_STATUS_INVALID_PARAMS);
925 if (!hdev_is_powered(hdev) && timeout > 0) {
926 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
927 MGMT_STATUS_NOT_POWERED);
931 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
932 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
933 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
938 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
939 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
940 MGMT_STATUS_REJECTED);
944 if (!hdev_is_powered(hdev)) {
945 bool changed = false;
947 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
948 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
952 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
957 err = new_settings(hdev, sk);
962 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
963 if (hdev->discov_timeout > 0) {
964 cancel_delayed_work(&hdev->discov_off);
965 hdev->discov_timeout = 0;
968 if (cp->val && timeout > 0) {
969 hdev->discov_timeout = timeout;
970 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
971 msecs_to_jiffies(hdev->discov_timeout * 1000));
974 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
978 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
987 scan |= SCAN_INQUIRY;
989 cancel_delayed_work(&hdev->discov_off);
991 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
993 mgmt_pending_remove(cmd);
996 hdev->discov_timeout = timeout;
999 hci_dev_unlock(hdev);
1003 static void write_fast_connectable(struct hci_request *req, bool enable)
1005 struct hci_dev *hdev = req->hdev;
1006 struct hci_cp_write_page_scan_activity acp;
1010 type = PAGE_SCAN_TYPE_INTERLACED;
1012 /* 160 msec page scan interval */
1013 acp.interval = __constant_cpu_to_le16(0x0100);
1015 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1017 /* default 1.28 sec page scan */
1018 acp.interval = __constant_cpu_to_le16(0x0800);
1021 acp.window = __constant_cpu_to_le16(0x0012);
1023 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1024 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1025 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1028 if (hdev->page_scan_type != type)
1029 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1032 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1034 struct pending_cmd *cmd;
1036 BT_DBG("status 0x%02x", status);
1040 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1044 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1046 mgmt_pending_remove(cmd);
1049 hci_dev_unlock(hdev);
1052 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1055 struct mgmt_mode *cp = data;
1056 struct pending_cmd *cmd;
1057 struct hci_request req;
1061 BT_DBG("request for %s", hdev->name);
1063 if (!lmp_bredr_capable(hdev))
1064 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1065 MGMT_STATUS_NOT_SUPPORTED);
1067 if (cp->val != 0x00 && cp->val != 0x01)
1068 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1069 MGMT_STATUS_INVALID_PARAMS);
1073 if (!hdev_is_powered(hdev)) {
1074 bool changed = false;
1076 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1080 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1082 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1083 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1086 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1091 err = new_settings(hdev, sk);
1096 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1097 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1098 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1103 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1104 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1108 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1119 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1120 hdev->discov_timeout > 0)
1121 cancel_delayed_work(&hdev->discov_off);
1124 hci_req_init(&req, hdev);
1126 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1128 if (!cp->val && test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1129 write_fast_connectable(&req, false);
1131 err = hci_req_run(&req, set_connectable_complete);
1133 mgmt_pending_remove(cmd);
1136 hci_dev_unlock(hdev);
1140 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1143 struct mgmt_mode *cp = data;
1146 BT_DBG("request for %s", hdev->name);
1148 if (cp->val != 0x00 && cp->val != 0x01)
1149 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1150 MGMT_STATUS_INVALID_PARAMS);
1155 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1157 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1159 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1163 err = new_settings(hdev, sk);
1166 hci_dev_unlock(hdev);
1170 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1173 struct mgmt_mode *cp = data;
1174 struct pending_cmd *cmd;
1178 BT_DBG("request for %s", hdev->name);
1180 if (!lmp_bredr_capable(hdev))
1181 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1182 MGMT_STATUS_NOT_SUPPORTED);
1184 if (cp->val != 0x00 && cp->val != 0x01)
1185 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1186 MGMT_STATUS_INVALID_PARAMS);
1190 if (!hdev_is_powered(hdev)) {
1191 bool changed = false;
1193 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1194 &hdev->dev_flags)) {
1195 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1199 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1204 err = new_settings(hdev, sk);
1209 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1210 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1217 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1218 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1222 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1228 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1230 mgmt_pending_remove(cmd);
1235 hci_dev_unlock(hdev);
1239 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1241 struct mgmt_mode *cp = data;
1242 struct pending_cmd *cmd;
1246 BT_DBG("request for %s", hdev->name);
1248 if (!lmp_ssp_capable(hdev))
1249 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1250 MGMT_STATUS_NOT_SUPPORTED);
1252 if (cp->val != 0x00 && cp->val != 0x01)
1253 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1254 MGMT_STATUS_INVALID_PARAMS);
1260 if (!hdev_is_powered(hdev)) {
1261 bool changed = false;
1263 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1264 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1268 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1273 err = new_settings(hdev, sk);
1278 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1279 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1284 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1285 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1289 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1295 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1297 mgmt_pending_remove(cmd);
1302 hci_dev_unlock(hdev);
1306 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1308 struct mgmt_mode *cp = data;
1310 BT_DBG("request for %s", hdev->name);
1313 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1314 MGMT_STATUS_NOT_SUPPORTED);
1316 if (cp->val != 0x00 && cp->val != 0x01)
1317 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1318 MGMT_STATUS_INVALID_PARAMS);
1321 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1323 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1325 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1328 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1330 struct mgmt_mode *cp = data;
1331 struct hci_cp_write_le_host_supported hci_cp;
1332 struct pending_cmd *cmd;
1336 BT_DBG("request for %s", hdev->name);
1338 if (!lmp_le_capable(hdev))
1339 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1340 MGMT_STATUS_NOT_SUPPORTED);
1342 if (cp->val != 0x00 && cp->val != 0x01)
1343 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1344 MGMT_STATUS_INVALID_PARAMS);
1349 enabled = lmp_host_le_capable(hdev);
1351 if (!hdev_is_powered(hdev) || val == enabled) {
1352 bool changed = false;
1354 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1355 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1359 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1364 err = new_settings(hdev, sk);
1369 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1370 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1375 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1381 memset(&hci_cp, 0, sizeof(hci_cp));
1385 hci_cp.simul = lmp_le_br_capable(hdev);
1388 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1391 mgmt_pending_remove(cmd);
1394 hci_dev_unlock(hdev);
1398 /* This is a helper function to test for pending mgmt commands that can
1399 * cause CoD or EIR HCI commands. We can only allow one such pending
1400 * mgmt command at a time since otherwise we cannot easily track what
1401 * the current values are, will be, and based on that calculate if a new
1402 * HCI command needs to be sent and if yes with what value.
1404 static bool pending_eir_or_class(struct hci_dev *hdev)
1406 struct pending_cmd *cmd;
1408 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1409 switch (cmd->opcode) {
1410 case MGMT_OP_ADD_UUID:
1411 case MGMT_OP_REMOVE_UUID:
1412 case MGMT_OP_SET_DEV_CLASS:
1413 case MGMT_OP_SET_POWERED:
1421 static const u8 bluetooth_base_uuid[] = {
1422 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1423 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1426 static u8 get_uuid_size(const u8 *uuid)
1430 if (memcmp(uuid, bluetooth_base_uuid, 12))
1433 val = get_unaligned_le32(&uuid[12]);
1440 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1442 struct pending_cmd *cmd;
1446 cmd = mgmt_pending_find(mgmt_op, hdev);
1450 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1451 hdev->dev_class, 3);
1453 mgmt_pending_remove(cmd);
1456 hci_dev_unlock(hdev);
1459 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1461 BT_DBG("status 0x%02x", status);
1463 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1466 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1468 struct mgmt_cp_add_uuid *cp = data;
1469 struct pending_cmd *cmd;
1470 struct hci_request req;
1471 struct bt_uuid *uuid;
1474 BT_DBG("request for %s", hdev->name);
1478 if (pending_eir_or_class(hdev)) {
1479 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1484 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1490 memcpy(uuid->uuid, cp->uuid, 16);
1491 uuid->svc_hint = cp->svc_hint;
1492 uuid->size = get_uuid_size(cp->uuid);
1494 list_add_tail(&uuid->list, &hdev->uuids);
1496 hci_req_init(&req, hdev);
1501 err = hci_req_run(&req, add_uuid_complete);
1503 if (err != -ENODATA)
1506 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1507 hdev->dev_class, 3);
1511 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1520 hci_dev_unlock(hdev);
1524 static bool enable_service_cache(struct hci_dev *hdev)
1526 if (!hdev_is_powered(hdev))
1529 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1530 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1538 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1540 BT_DBG("status 0x%02x", status);
1542 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1545 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1548 struct mgmt_cp_remove_uuid *cp = data;
1549 struct pending_cmd *cmd;
1550 struct bt_uuid *match, *tmp;
1551 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1552 struct hci_request req;
1555 BT_DBG("request for %s", hdev->name);
1559 if (pending_eir_or_class(hdev)) {
1560 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1565 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1566 err = hci_uuids_clear(hdev);
1568 if (enable_service_cache(hdev)) {
1569 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1570 0, hdev->dev_class, 3);
1579 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1580 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1583 list_del(&match->list);
1589 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1590 MGMT_STATUS_INVALID_PARAMS);
1595 hci_req_init(&req, hdev);
1600 err = hci_req_run(&req, remove_uuid_complete);
1602 if (err != -ENODATA)
1605 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1606 hdev->dev_class, 3);
1610 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1619 hci_dev_unlock(hdev);
1623 static void set_class_complete(struct hci_dev *hdev, u8 status)
1625 BT_DBG("status 0x%02x", status);
1627 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1630 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1633 struct mgmt_cp_set_dev_class *cp = data;
1634 struct pending_cmd *cmd;
1635 struct hci_request req;
1638 BT_DBG("request for %s", hdev->name);
1640 if (!lmp_bredr_capable(hdev))
1641 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1642 MGMT_STATUS_NOT_SUPPORTED);
1646 if (pending_eir_or_class(hdev)) {
1647 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1652 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1653 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1654 MGMT_STATUS_INVALID_PARAMS);
1658 hdev->major_class = cp->major;
1659 hdev->minor_class = cp->minor;
1661 if (!hdev_is_powered(hdev)) {
1662 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1663 hdev->dev_class, 3);
1667 hci_req_init(&req, hdev);
1669 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1670 hci_dev_unlock(hdev);
1671 cancel_delayed_work_sync(&hdev->service_cache);
1678 err = hci_req_run(&req, set_class_complete);
1680 if (err != -ENODATA)
1683 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1684 hdev->dev_class, 3);
1688 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1697 hci_dev_unlock(hdev);
1701 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1704 struct mgmt_cp_load_link_keys *cp = data;
1705 u16 key_count, expected_len;
1708 key_count = __le16_to_cpu(cp->key_count);
1710 expected_len = sizeof(*cp) + key_count *
1711 sizeof(struct mgmt_link_key_info);
1712 if (expected_len != len) {
1713 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1715 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1716 MGMT_STATUS_INVALID_PARAMS);
1719 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1720 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1721 MGMT_STATUS_INVALID_PARAMS);
1723 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1726 for (i = 0; i < key_count; i++) {
1727 struct mgmt_link_key_info *key = &cp->keys[i];
1729 if (key->addr.type != BDADDR_BREDR)
1730 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1731 MGMT_STATUS_INVALID_PARAMS);
1736 hci_link_keys_clear(hdev);
1738 set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1741 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1743 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1745 for (i = 0; i < key_count; i++) {
1746 struct mgmt_link_key_info *key = &cp->keys[i];
1748 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1749 key->type, key->pin_len);
1752 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1754 hci_dev_unlock(hdev);
1759 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1760 u8 addr_type, struct sock *skip_sk)
1762 struct mgmt_ev_device_unpaired ev;
1764 bacpy(&ev.addr.bdaddr, bdaddr);
1765 ev.addr.type = addr_type;
1767 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1771 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1774 struct mgmt_cp_unpair_device *cp = data;
1775 struct mgmt_rp_unpair_device rp;
1776 struct hci_cp_disconnect dc;
1777 struct pending_cmd *cmd;
1778 struct hci_conn *conn;
1781 memset(&rp, 0, sizeof(rp));
1782 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1783 rp.addr.type = cp->addr.type;
1785 if (!bdaddr_type_is_valid(cp->addr.type))
1786 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1787 MGMT_STATUS_INVALID_PARAMS,
1790 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1791 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1792 MGMT_STATUS_INVALID_PARAMS,
1797 if (!hdev_is_powered(hdev)) {
1798 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1799 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1803 if (cp->addr.type == BDADDR_BREDR)
1804 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1806 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1809 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1810 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1814 if (cp->disconnect) {
1815 if (cp->addr.type == BDADDR_BREDR)
1816 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1819 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1826 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1828 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1832 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1839 dc.handle = cpu_to_le16(conn->handle);
1840 dc.reason = 0x13; /* Remote User Terminated Connection */
1841 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1843 mgmt_pending_remove(cmd);
1846 hci_dev_unlock(hdev);
1850 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1853 struct mgmt_cp_disconnect *cp = data;
1854 struct mgmt_rp_disconnect rp;
1855 struct hci_cp_disconnect dc;
1856 struct pending_cmd *cmd;
1857 struct hci_conn *conn;
1862 memset(&rp, 0, sizeof(rp));
1863 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1864 rp.addr.type = cp->addr.type;
1866 if (!bdaddr_type_is_valid(cp->addr.type))
1867 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1868 MGMT_STATUS_INVALID_PARAMS,
1873 if (!test_bit(HCI_UP, &hdev->flags)) {
1874 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1875 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1879 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1880 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1881 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1885 if (cp->addr.type == BDADDR_BREDR)
1886 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1889 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1891 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1892 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1893 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1897 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1903 dc.handle = cpu_to_le16(conn->handle);
1904 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1906 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1908 mgmt_pending_remove(cmd);
1911 hci_dev_unlock(hdev);
1915 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1917 switch (link_type) {
1919 switch (addr_type) {
1920 case ADDR_LE_DEV_PUBLIC:
1921 return BDADDR_LE_PUBLIC;
1924 /* Fallback to LE Random address type */
1925 return BDADDR_LE_RANDOM;
1929 /* Fallback to BR/EDR type */
1930 return BDADDR_BREDR;
1934 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1937 struct mgmt_rp_get_connections *rp;
1947 if (!hdev_is_powered(hdev)) {
1948 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1949 MGMT_STATUS_NOT_POWERED);
1954 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1955 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1959 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1960 rp = kmalloc(rp_len, GFP_KERNEL);
1967 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1968 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1970 bacpy(&rp->addr[i].bdaddr, &c->dst);
1971 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1972 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1977 rp->conn_count = cpu_to_le16(i);
1979 /* Recalculate length in case of filtered SCO connections, etc */
1980 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1982 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1988 hci_dev_unlock(hdev);
1992 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1993 struct mgmt_cp_pin_code_neg_reply *cp)
1995 struct pending_cmd *cmd;
1998 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2003 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2004 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2006 mgmt_pending_remove(cmd);
2011 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2014 struct hci_conn *conn;
2015 struct mgmt_cp_pin_code_reply *cp = data;
2016 struct hci_cp_pin_code_reply reply;
2017 struct pending_cmd *cmd;
2024 if (!hdev_is_powered(hdev)) {
2025 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2026 MGMT_STATUS_NOT_POWERED);
2030 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2032 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2033 MGMT_STATUS_NOT_CONNECTED);
2037 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2038 struct mgmt_cp_pin_code_neg_reply ncp;
2040 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2042 BT_ERR("PIN code is not 16 bytes long");
2044 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2046 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2047 MGMT_STATUS_INVALID_PARAMS);
2052 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2058 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2059 reply.pin_len = cp->pin_len;
2060 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2062 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2064 mgmt_pending_remove(cmd);
2067 hci_dev_unlock(hdev);
2071 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2074 struct mgmt_cp_set_io_capability *cp = data;
2080 hdev->io_capability = cp->io_capability;
2082 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2083 hdev->io_capability);
2085 hci_dev_unlock(hdev);
2087 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2091 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2093 struct hci_dev *hdev = conn->hdev;
2094 struct pending_cmd *cmd;
2096 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2097 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2100 if (cmd->user_data != conn)
2109 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2111 struct mgmt_rp_pair_device rp;
2112 struct hci_conn *conn = cmd->user_data;
2114 bacpy(&rp.addr.bdaddr, &conn->dst);
2115 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2117 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2120 /* So we don't get further callbacks for this connection */
2121 conn->connect_cfm_cb = NULL;
2122 conn->security_cfm_cb = NULL;
2123 conn->disconn_cfm_cb = NULL;
2127 mgmt_pending_remove(cmd);
2130 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2132 struct pending_cmd *cmd;
2134 BT_DBG("status %u", status);
2136 cmd = find_pairing(conn);
2138 BT_DBG("Unable to find a pending command");
2140 pairing_complete(cmd, mgmt_status(status));
2143 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2145 struct pending_cmd *cmd;
2147 BT_DBG("status %u", status);
2152 cmd = find_pairing(conn);
2154 BT_DBG("Unable to find a pending command");
2156 pairing_complete(cmd, mgmt_status(status));
2159 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2162 struct mgmt_cp_pair_device *cp = data;
2163 struct mgmt_rp_pair_device rp;
2164 struct pending_cmd *cmd;
2165 u8 sec_level, auth_type;
2166 struct hci_conn *conn;
2171 memset(&rp, 0, sizeof(rp));
2172 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2173 rp.addr.type = cp->addr.type;
2175 if (!bdaddr_type_is_valid(cp->addr.type))
2176 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2177 MGMT_STATUS_INVALID_PARAMS,
2182 if (!hdev_is_powered(hdev)) {
2183 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2184 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2188 sec_level = BT_SECURITY_MEDIUM;
2189 if (cp->io_cap == 0x03)
2190 auth_type = HCI_AT_DEDICATED_BONDING;
2192 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2194 if (cp->addr.type == BDADDR_BREDR)
2195 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2196 cp->addr.type, sec_level, auth_type);
2198 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2199 cp->addr.type, sec_level, auth_type);
2204 if (PTR_ERR(conn) == -EBUSY)
2205 status = MGMT_STATUS_BUSY;
2207 status = MGMT_STATUS_CONNECT_FAILED;
2209 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2215 if (conn->connect_cfm_cb) {
2217 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2218 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2222 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2229 /* For LE, just connecting isn't a proof that the pairing finished */
2230 if (cp->addr.type == BDADDR_BREDR)
2231 conn->connect_cfm_cb = pairing_complete_cb;
2233 conn->connect_cfm_cb = le_connect_complete_cb;
2235 conn->security_cfm_cb = pairing_complete_cb;
2236 conn->disconn_cfm_cb = pairing_complete_cb;
2237 conn->io_capability = cp->io_cap;
2238 cmd->user_data = conn;
2240 if (conn->state == BT_CONNECTED &&
2241 hci_conn_security(conn, sec_level, auth_type))
2242 pairing_complete(cmd, 0);
2247 hci_dev_unlock(hdev);
2251 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2254 struct mgmt_addr_info *addr = data;
2255 struct pending_cmd *cmd;
2256 struct hci_conn *conn;
2263 if (!hdev_is_powered(hdev)) {
2264 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2265 MGMT_STATUS_NOT_POWERED);
2269 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2271 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2272 MGMT_STATUS_INVALID_PARAMS);
2276 conn = cmd->user_data;
2278 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2279 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2280 MGMT_STATUS_INVALID_PARAMS);
2284 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2286 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2287 addr, sizeof(*addr));
2289 hci_dev_unlock(hdev);
2293 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2294 bdaddr_t *bdaddr, u8 type, u16 mgmt_op,
2295 u16 hci_op, __le32 passkey)
2297 struct pending_cmd *cmd;
2298 struct hci_conn *conn;
2303 if (!hdev_is_powered(hdev)) {
2304 err = cmd_status(sk, hdev->id, mgmt_op,
2305 MGMT_STATUS_NOT_POWERED);
2309 if (type == BDADDR_BREDR)
2310 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
2312 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
2315 err = cmd_status(sk, hdev->id, mgmt_op,
2316 MGMT_STATUS_NOT_CONNECTED);
2320 if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
2321 /* Continue with pairing via SMP */
2322 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2325 err = cmd_status(sk, hdev->id, mgmt_op,
2326 MGMT_STATUS_SUCCESS);
2328 err = cmd_status(sk, hdev->id, mgmt_op,
2329 MGMT_STATUS_FAILED);
2334 cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr));
2340 /* Continue with pairing via HCI */
2341 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2342 struct hci_cp_user_passkey_reply cp;
2344 bacpy(&cp.bdaddr, bdaddr);
2345 cp.passkey = passkey;
2346 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2348 err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr);
2351 mgmt_pending_remove(cmd);
2354 hci_dev_unlock(hdev);
2358 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2359 void *data, u16 len)
2361 struct mgmt_cp_pin_code_neg_reply *cp = data;
2365 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2366 MGMT_OP_PIN_CODE_NEG_REPLY,
2367 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2370 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2373 struct mgmt_cp_user_confirm_reply *cp = data;
2377 if (len != sizeof(*cp))
2378 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2379 MGMT_STATUS_INVALID_PARAMS);
2381 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2382 MGMT_OP_USER_CONFIRM_REPLY,
2383 HCI_OP_USER_CONFIRM_REPLY, 0);
2386 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2387 void *data, u16 len)
2389 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2393 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2394 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2395 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2398 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2401 struct mgmt_cp_user_passkey_reply *cp = data;
2405 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2406 MGMT_OP_USER_PASSKEY_REPLY,
2407 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2410 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2411 void *data, u16 len)
2413 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2417 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2418 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2419 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2422 static void update_name(struct hci_request *req)
2424 struct hci_dev *hdev = req->hdev;
2425 struct hci_cp_write_local_name cp;
2427 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2429 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2432 static void set_name_complete(struct hci_dev *hdev, u8 status)
2434 struct mgmt_cp_set_local_name *cp;
2435 struct pending_cmd *cmd;
2437 BT_DBG("status 0x%02x", status);
2441 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2448 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2449 mgmt_status(status));
2451 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2454 mgmt_pending_remove(cmd);
2457 hci_dev_unlock(hdev);
2460 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2463 struct mgmt_cp_set_local_name *cp = data;
2464 struct pending_cmd *cmd;
2465 struct hci_request req;
2472 /* If the old values are the same as the new ones just return a
2473 * direct command complete event.
2475 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2476 !memcmp(hdev->short_name, cp->short_name,
2477 sizeof(hdev->short_name))) {
2478 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2483 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2485 if (!hdev_is_powered(hdev)) {
2486 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2488 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2493 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2499 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2505 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2507 hci_req_init(&req, hdev);
2509 if (lmp_bredr_capable(hdev)) {
2514 if (lmp_le_capable(hdev))
2515 hci_update_ad(&req);
2517 err = hci_req_run(&req, set_name_complete);
2519 mgmt_pending_remove(cmd);
2522 hci_dev_unlock(hdev);
2526 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2527 void *data, u16 data_len)
2529 struct pending_cmd *cmd;
2532 BT_DBG("%s", hdev->name);
2536 if (!hdev_is_powered(hdev)) {
2537 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2538 MGMT_STATUS_NOT_POWERED);
2542 if (!lmp_ssp_capable(hdev)) {
2543 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2544 MGMT_STATUS_NOT_SUPPORTED);
2548 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2549 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2554 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2560 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2562 mgmt_pending_remove(cmd);
2565 hci_dev_unlock(hdev);
2569 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2570 void *data, u16 len)
2572 struct mgmt_cp_add_remote_oob_data *cp = data;
2576 BT_DBG("%s ", hdev->name);
2580 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2583 status = MGMT_STATUS_FAILED;
2585 status = MGMT_STATUS_SUCCESS;
2587 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2588 &cp->addr, sizeof(cp->addr));
2590 hci_dev_unlock(hdev);
2594 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2595 void *data, u16 len)
2597 struct mgmt_cp_remove_remote_oob_data *cp = data;
2601 BT_DBG("%s", hdev->name);
2605 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2607 status = MGMT_STATUS_INVALID_PARAMS;
2609 status = MGMT_STATUS_SUCCESS;
2611 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2612 status, &cp->addr, sizeof(cp->addr));
2614 hci_dev_unlock(hdev);
2618 int mgmt_interleaved_discovery(struct hci_dev *hdev)
2622 BT_DBG("%s", hdev->name);
2626 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
2628 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2630 hci_dev_unlock(hdev);
2635 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2636 void *data, u16 len)
2638 struct mgmt_cp_start_discovery *cp = data;
2639 struct pending_cmd *cmd;
2642 BT_DBG("%s", hdev->name);
2646 if (!hdev_is_powered(hdev)) {
2647 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2648 MGMT_STATUS_NOT_POWERED);
2652 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2653 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2658 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2659 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2664 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2670 hdev->discovery.type = cp->type;
2672 switch (hdev->discovery.type) {
2673 case DISCOV_TYPE_BREDR:
2674 if (!lmp_bredr_capable(hdev)) {
2675 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2676 MGMT_STATUS_NOT_SUPPORTED);
2677 mgmt_pending_remove(cmd);
2681 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2684 case DISCOV_TYPE_LE:
2685 if (!lmp_host_le_capable(hdev)) {
2686 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2687 MGMT_STATUS_NOT_SUPPORTED);
2688 mgmt_pending_remove(cmd);
2692 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2693 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2696 case DISCOV_TYPE_INTERLEAVED:
2697 if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
2698 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2699 MGMT_STATUS_NOT_SUPPORTED);
2700 mgmt_pending_remove(cmd);
2704 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, LE_SCAN_WIN,
2705 LE_SCAN_TIMEOUT_BREDR_LE);
2709 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2710 MGMT_STATUS_INVALID_PARAMS);
2711 mgmt_pending_remove(cmd);
2716 mgmt_pending_remove(cmd);
2718 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2721 hci_dev_unlock(hdev);
2725 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2728 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2729 struct pending_cmd *cmd;
2730 struct hci_cp_remote_name_req_cancel cp;
2731 struct inquiry_entry *e;
2734 BT_DBG("%s", hdev->name);
2738 if (!hci_discovery_active(hdev)) {
2739 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2740 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2741 sizeof(mgmt_cp->type));
2745 if (hdev->discovery.type != mgmt_cp->type) {
2746 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2747 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2748 sizeof(mgmt_cp->type));
2752 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2758 switch (hdev->discovery.state) {
2759 case DISCOVERY_FINDING:
2760 if (test_bit(HCI_INQUIRY, &hdev->flags))
2761 err = hci_cancel_inquiry(hdev);
2763 err = hci_cancel_le_scan(hdev);
2767 case DISCOVERY_RESOLVING:
2768 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2771 mgmt_pending_remove(cmd);
2772 err = cmd_complete(sk, hdev->id,
2773 MGMT_OP_STOP_DISCOVERY, 0,
2775 sizeof(mgmt_cp->type));
2776 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2780 bacpy(&cp.bdaddr, &e->data.bdaddr);
2781 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2787 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2792 mgmt_pending_remove(cmd);
2794 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2797 hci_dev_unlock(hdev);
2801 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2804 struct mgmt_cp_confirm_name *cp = data;
2805 struct inquiry_entry *e;
2808 BT_DBG("%s", hdev->name);
2812 if (!hci_discovery_active(hdev)) {
2813 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2814 MGMT_STATUS_FAILED);
2818 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2820 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2821 MGMT_STATUS_INVALID_PARAMS);
2825 if (cp->name_known) {
2826 e->name_state = NAME_KNOWN;
2829 e->name_state = NAME_NEEDED;
2830 hci_inquiry_cache_update_resolve(hdev, e);
2833 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2837 hci_dev_unlock(hdev);
2841 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2844 struct mgmt_cp_block_device *cp = data;
2848 BT_DBG("%s", hdev->name);
2850 if (!bdaddr_type_is_valid(cp->addr.type))
2851 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2852 MGMT_STATUS_INVALID_PARAMS,
2853 &cp->addr, sizeof(cp->addr));
2857 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2859 status = MGMT_STATUS_FAILED;
2861 status = MGMT_STATUS_SUCCESS;
2863 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2864 &cp->addr, sizeof(cp->addr));
2866 hci_dev_unlock(hdev);
2871 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2874 struct mgmt_cp_unblock_device *cp = data;
2878 BT_DBG("%s", hdev->name);
2880 if (!bdaddr_type_is_valid(cp->addr.type))
2881 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
2882 MGMT_STATUS_INVALID_PARAMS,
2883 &cp->addr, sizeof(cp->addr));
2887 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2889 status = MGMT_STATUS_INVALID_PARAMS;
2891 status = MGMT_STATUS_SUCCESS;
2893 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2894 &cp->addr, sizeof(cp->addr));
2896 hci_dev_unlock(hdev);
2901 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2904 struct mgmt_cp_set_device_id *cp = data;
2905 struct hci_request req;
2909 BT_DBG("%s", hdev->name);
2911 source = __le16_to_cpu(cp->source);
2913 if (source > 0x0002)
2914 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2915 MGMT_STATUS_INVALID_PARAMS);
2919 hdev->devid_source = source;
2920 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2921 hdev->devid_product = __le16_to_cpu(cp->product);
2922 hdev->devid_version = __le16_to_cpu(cp->version);
2924 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2926 hci_req_init(&req, hdev);
2928 hci_req_run(&req, NULL);
2930 hci_dev_unlock(hdev);
2935 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
2937 struct pending_cmd *cmd;
2939 BT_DBG("status 0x%02x", status);
2943 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2948 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2949 mgmt_status(status));
2951 struct mgmt_mode *cp = cmd->param;
2954 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
2956 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
2958 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2959 new_settings(hdev, cmd->sk);
2962 mgmt_pending_remove(cmd);
2965 hci_dev_unlock(hdev);
2968 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2969 void *data, u16 len)
2971 struct mgmt_mode *cp = data;
2972 struct pending_cmd *cmd;
2973 struct hci_request req;
2976 BT_DBG("%s", hdev->name);
2978 if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
2979 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2980 MGMT_STATUS_NOT_SUPPORTED);
2982 if (cp->val != 0x00 && cp->val != 0x01)
2983 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2984 MGMT_STATUS_INVALID_PARAMS);
2986 if (!hdev_is_powered(hdev))
2987 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2988 MGMT_STATUS_NOT_POWERED);
2990 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2991 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2992 MGMT_STATUS_REJECTED);
2996 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
2997 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3002 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3003 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3008 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3015 hci_req_init(&req, hdev);
3017 write_fast_connectable(&req, cp->val);
3019 err = hci_req_run(&req, fast_connectable_complete);
3021 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3022 MGMT_STATUS_FAILED);
3023 mgmt_pending_remove(cmd);
3027 hci_dev_unlock(hdev);
3032 static bool ltk_is_valid(struct mgmt_ltk_info *key)
3034 if (key->authenticated != 0x00 && key->authenticated != 0x01)
3036 if (key->master != 0x00 && key->master != 0x01)
3038 if (!bdaddr_type_is_le(key->addr.type))
3043 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3044 void *cp_data, u16 len)
3046 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3047 u16 key_count, expected_len;
3050 key_count = __le16_to_cpu(cp->key_count);
3052 expected_len = sizeof(*cp) + key_count *
3053 sizeof(struct mgmt_ltk_info);
3054 if (expected_len != len) {
3055 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3057 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3058 MGMT_STATUS_INVALID_PARAMS);
3061 BT_DBG("%s key_count %u", hdev->name, key_count);
3063 for (i = 0; i < key_count; i++) {
3064 struct mgmt_ltk_info *key = &cp->keys[i];
3066 if (!ltk_is_valid(key))
3067 return cmd_status(sk, hdev->id,
3068 MGMT_OP_LOAD_LONG_TERM_KEYS,
3069 MGMT_STATUS_INVALID_PARAMS);
3074 hci_smp_ltks_clear(hdev);
3076 for (i = 0; i < key_count; i++) {
3077 struct mgmt_ltk_info *key = &cp->keys[i];
3083 type = HCI_SMP_LTK_SLAVE;
3085 hci_add_ltk(hdev, &key->addr.bdaddr,
3086 bdaddr_to_le(key->addr.type),
3087 type, 0, key->authenticated, key->val,
3088 key->enc_size, key->ediv, key->rand);
3091 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3094 hci_dev_unlock(hdev);
3099 static const struct mgmt_handler {
3100 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3104 } mgmt_handlers[] = {
3105 { NULL }, /* 0x0000 (no command) */
3106 { read_version, false, MGMT_READ_VERSION_SIZE },
3107 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3108 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3109 { read_controller_info, false, MGMT_READ_INFO_SIZE },
3110 { set_powered, false, MGMT_SETTING_SIZE },
3111 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
3112 { set_connectable, false, MGMT_SETTING_SIZE },
3113 { set_fast_connectable, false, MGMT_SETTING_SIZE },
3114 { set_pairable, false, MGMT_SETTING_SIZE },
3115 { set_link_security, false, MGMT_SETTING_SIZE },
3116 { set_ssp, false, MGMT_SETTING_SIZE },
3117 { set_hs, false, MGMT_SETTING_SIZE },
3118 { set_le, false, MGMT_SETTING_SIZE },
3119 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
3120 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
3121 { add_uuid, false, MGMT_ADD_UUID_SIZE },
3122 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
3123 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
3124 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
3125 { disconnect, false, MGMT_DISCONNECT_SIZE },
3126 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
3127 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
3128 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
3129 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
3130 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
3131 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
3132 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
3133 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
3134 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
3135 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
3136 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
3137 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
3138 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
3139 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
3140 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3141 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3142 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3143 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3144 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3145 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3149 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3153 struct mgmt_hdr *hdr;
3154 u16 opcode, index, len;
3155 struct hci_dev *hdev = NULL;
3156 const struct mgmt_handler *handler;
3159 BT_DBG("got %zu bytes", msglen);
3161 if (msglen < sizeof(*hdr))
3164 buf = kmalloc(msglen, GFP_KERNEL);
3168 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3174 opcode = __le16_to_cpu(hdr->opcode);
3175 index = __le16_to_cpu(hdr->index);
3176 len = __le16_to_cpu(hdr->len);
3178 if (len != msglen - sizeof(*hdr)) {
3183 if (index != MGMT_INDEX_NONE) {
3184 hdev = hci_dev_get(index);
3186 err = cmd_status(sk, index, opcode,
3187 MGMT_STATUS_INVALID_INDEX);
3192 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3193 mgmt_handlers[opcode].func == NULL) {
3194 BT_DBG("Unknown op %u", opcode);
3195 err = cmd_status(sk, index, opcode,
3196 MGMT_STATUS_UNKNOWN_COMMAND);
3200 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3201 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3202 err = cmd_status(sk, index, opcode,
3203 MGMT_STATUS_INVALID_INDEX);
3207 handler = &mgmt_handlers[opcode];
3209 if ((handler->var_len && len < handler->data_len) ||
3210 (!handler->var_len && len != handler->data_len)) {
3211 err = cmd_status(sk, index, opcode,
3212 MGMT_STATUS_INVALID_PARAMS);
3217 mgmt_init_hdev(sk, hdev);
3219 cp = buf + sizeof(*hdr);
3221 err = handler->func(sk, hdev, cp, len);
3235 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
3239 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
3240 mgmt_pending_remove(cmd);
3243 int mgmt_index_added(struct hci_dev *hdev)
3245 if (!mgmt_valid_hdev(hdev))
3248 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3251 int mgmt_index_removed(struct hci_dev *hdev)
3253 u8 status = MGMT_STATUS_INVALID_INDEX;
3255 if (!mgmt_valid_hdev(hdev))
3258 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3260 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3265 struct hci_dev *hdev;
3269 static void settings_rsp(struct pending_cmd *cmd, void *data)
3271 struct cmd_lookup *match = data;
3273 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
3275 list_del(&cmd->list);
3277 if (match->sk == NULL) {
3278 match->sk = cmd->sk;
3279 sock_hold(match->sk);
3282 mgmt_pending_free(cmd);
3285 static void set_bredr_scan(struct hci_request *req)
3287 struct hci_dev *hdev = req->hdev;
3290 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3292 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3293 scan |= SCAN_INQUIRY;
3296 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3299 static void powered_complete(struct hci_dev *hdev, u8 status)
3301 struct cmd_lookup match = { NULL, hdev };
3303 BT_DBG("status 0x%02x", status);
3307 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3309 new_settings(hdev, match.sk);
3311 hci_dev_unlock(hdev);
3317 static int powered_update_hci(struct hci_dev *hdev)
3319 struct hci_request req;
3322 hci_req_init(&req, hdev);
3324 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3325 !lmp_host_ssp_capable(hdev)) {
3328 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3331 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
3332 struct hci_cp_write_le_host_supported cp;
3335 cp.simul = lmp_le_br_capable(hdev);
3337 /* Check first if we already have the right
3338 * host state (host features set)
3340 if (cp.le != lmp_host_le_capable(hdev) ||
3341 cp.simul != lmp_host_le_br_capable(hdev))
3342 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3346 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3347 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3348 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3349 sizeof(link_sec), &link_sec);
3351 if (lmp_bredr_capable(hdev)) {
3352 set_bredr_scan(&req);
3358 return hci_req_run(&req, powered_complete);
3361 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3363 struct cmd_lookup match = { NULL, hdev };
3364 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3365 u8 zero_cod[] = { 0, 0, 0 };
3368 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3372 if (powered_update_hci(hdev) == 0)
3375 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3380 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3381 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3383 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3384 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3385 zero_cod, sizeof(zero_cod), NULL);
3388 err = new_settings(hdev, match.sk);
3396 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3398 struct cmd_lookup match = { NULL, hdev };
3399 bool changed = false;
3403 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3406 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3410 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3414 err = new_settings(hdev, match.sk);
3422 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3424 struct pending_cmd *cmd;
3425 bool changed = false;
3429 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3432 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3436 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
3439 err = new_settings(hdev, cmd ? cmd->sk : NULL);
3444 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3446 u8 mgmt_err = mgmt_status(status);
3448 if (scan & SCAN_PAGE)
3449 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3450 cmd_status_rsp, &mgmt_err);
3452 if (scan & SCAN_INQUIRY)
3453 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3454 cmd_status_rsp, &mgmt_err);
3459 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3462 struct mgmt_ev_new_link_key ev;
3464 memset(&ev, 0, sizeof(ev));
3466 ev.store_hint = persistent;
3467 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3468 ev.key.addr.type = BDADDR_BREDR;
3469 ev.key.type = key->type;
3470 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3471 ev.key.pin_len = key->pin_len;
3473 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3476 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3478 struct mgmt_ev_new_long_term_key ev;
3480 memset(&ev, 0, sizeof(ev));
3482 ev.store_hint = persistent;
3483 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3484 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3485 ev.key.authenticated = key->authenticated;
3486 ev.key.enc_size = key->enc_size;
3487 ev.key.ediv = key->ediv;
3489 if (key->type == HCI_SMP_LTK)
3492 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3493 memcpy(ev.key.val, key->val, sizeof(key->val));
3495 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3499 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3500 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3504 struct mgmt_ev_device_connected *ev = (void *) buf;
3507 bacpy(&ev->addr.bdaddr, bdaddr);
3508 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3510 ev->flags = __cpu_to_le32(flags);
3513 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3516 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3517 eir_len = eir_append_data(ev->eir, eir_len,
3518 EIR_CLASS_OF_DEV, dev_class, 3);
3520 ev->eir_len = cpu_to_le16(eir_len);
3522 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3523 sizeof(*ev) + eir_len, NULL);
3526 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3528 struct mgmt_cp_disconnect *cp = cmd->param;
3529 struct sock **sk = data;
3530 struct mgmt_rp_disconnect rp;
3532 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3533 rp.addr.type = cp->addr.type;
3535 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3541 mgmt_pending_remove(cmd);
3544 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3546 struct hci_dev *hdev = data;
3547 struct mgmt_cp_unpair_device *cp = cmd->param;
3548 struct mgmt_rp_unpair_device rp;
3550 memset(&rp, 0, sizeof(rp));
3551 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3552 rp.addr.type = cp->addr.type;
3554 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3556 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3558 mgmt_pending_remove(cmd);
3561 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3562 u8 link_type, u8 addr_type, u8 reason)
3564 struct mgmt_ev_device_disconnected ev;
3565 struct sock *sk = NULL;
3568 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3570 bacpy(&ev.addr.bdaddr, bdaddr);
3571 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3574 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3580 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3586 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3587 u8 link_type, u8 addr_type, u8 status)
3589 struct mgmt_rp_disconnect rp;
3590 struct pending_cmd *cmd;
3593 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3596 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3600 bacpy(&rp.addr.bdaddr, bdaddr);
3601 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3603 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3604 mgmt_status(status), &rp, sizeof(rp));
3606 mgmt_pending_remove(cmd);
3611 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3612 u8 addr_type, u8 status)
3614 struct mgmt_ev_connect_failed ev;
3616 bacpy(&ev.addr.bdaddr, bdaddr);
3617 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3618 ev.status = mgmt_status(status);
3620 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3623 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3625 struct mgmt_ev_pin_code_request ev;
3627 bacpy(&ev.addr.bdaddr, bdaddr);
3628 ev.addr.type = BDADDR_BREDR;
3631 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3635 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3638 struct pending_cmd *cmd;
3639 struct mgmt_rp_pin_code_reply rp;
3642 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3646 bacpy(&rp.addr.bdaddr, bdaddr);
3647 rp.addr.type = BDADDR_BREDR;
3649 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3650 mgmt_status(status), &rp, sizeof(rp));
3652 mgmt_pending_remove(cmd);
3657 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3660 struct pending_cmd *cmd;
3661 struct mgmt_rp_pin_code_reply rp;
3664 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3668 bacpy(&rp.addr.bdaddr, bdaddr);
3669 rp.addr.type = BDADDR_BREDR;
3671 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3672 mgmt_status(status), &rp, sizeof(rp));
3674 mgmt_pending_remove(cmd);
3679 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3680 u8 link_type, u8 addr_type, __le32 value,
3683 struct mgmt_ev_user_confirm_request ev;
3685 BT_DBG("%s", hdev->name);
3687 bacpy(&ev.addr.bdaddr, bdaddr);
3688 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3689 ev.confirm_hint = confirm_hint;
3692 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3696 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3697 u8 link_type, u8 addr_type)
3699 struct mgmt_ev_user_passkey_request ev;
3701 BT_DBG("%s", hdev->name);
3703 bacpy(&ev.addr.bdaddr, bdaddr);
3704 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3706 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3710 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3711 u8 link_type, u8 addr_type, u8 status,
3714 struct pending_cmd *cmd;
3715 struct mgmt_rp_user_confirm_reply rp;
3718 cmd = mgmt_pending_find(opcode, hdev);
3722 bacpy(&rp.addr.bdaddr, bdaddr);
3723 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3724 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3727 mgmt_pending_remove(cmd);
3732 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3733 u8 link_type, u8 addr_type, u8 status)
3735 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3736 status, MGMT_OP_USER_CONFIRM_REPLY);
3739 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3740 u8 link_type, u8 addr_type, u8 status)
3742 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3744 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3747 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3748 u8 link_type, u8 addr_type, u8 status)
3750 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3751 status, MGMT_OP_USER_PASSKEY_REPLY);
3754 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3755 u8 link_type, u8 addr_type, u8 status)
3757 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3759 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3762 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3763 u8 link_type, u8 addr_type, u32 passkey,
3766 struct mgmt_ev_passkey_notify ev;
3768 BT_DBG("%s", hdev->name);
3770 bacpy(&ev.addr.bdaddr, bdaddr);
3771 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3772 ev.passkey = __cpu_to_le32(passkey);
3773 ev.entered = entered;
3775 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3778 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3779 u8 addr_type, u8 status)
3781 struct mgmt_ev_auth_failed ev;
3783 bacpy(&ev.addr.bdaddr, bdaddr);
3784 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3785 ev.status = mgmt_status(status);
3787 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3790 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3792 struct cmd_lookup match = { NULL, hdev };
3793 bool changed = false;
3797 u8 mgmt_err = mgmt_status(status);
3798 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3799 cmd_status_rsp, &mgmt_err);
3803 if (test_bit(HCI_AUTH, &hdev->flags)) {
3804 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3807 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3811 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3815 err = new_settings(hdev, match.sk);
3823 static void clear_eir(struct hci_request *req)
3825 struct hci_dev *hdev = req->hdev;
3826 struct hci_cp_write_eir cp;
3828 if (!lmp_ext_inq_capable(hdev))
3831 memset(hdev->eir, 0, sizeof(hdev->eir));
3833 memset(&cp, 0, sizeof(cp));
3835 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3838 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3840 struct cmd_lookup match = { NULL, hdev };
3841 struct hci_request req;
3842 bool changed = false;
3846 u8 mgmt_err = mgmt_status(status);
3848 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
3850 err = new_settings(hdev, NULL);
3852 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3859 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3862 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3866 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
3869 err = new_settings(hdev, match.sk);
3874 hci_req_init(&req, hdev);
3876 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3881 hci_req_run(&req, NULL);
3886 static void sk_lookup(struct pending_cmd *cmd, void *data)
3888 struct cmd_lookup *match = data;
3890 if (match->sk == NULL) {
3891 match->sk = cmd->sk;
3892 sock_hold(match->sk);
3896 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3899 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3902 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
3903 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
3904 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
3907 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3916 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
3918 struct mgmt_cp_set_local_name ev;
3919 struct pending_cmd *cmd;
3924 memset(&ev, 0, sizeof(ev));
3925 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3926 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
3928 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3930 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3932 /* If this is a HCI command related to powering on the
3933 * HCI dev don't send any mgmt signals.
3935 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
3939 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
3940 cmd ? cmd->sk : NULL);
3943 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
3944 u8 *randomizer, u8 status)
3946 struct pending_cmd *cmd;
3949 BT_DBG("%s status %u", hdev->name, status);
3951 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3956 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3957 mgmt_status(status));
3959 struct mgmt_rp_read_local_oob_data rp;
3961 memcpy(rp.hash, hash, sizeof(rp.hash));
3962 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
3964 err = cmd_complete(cmd->sk, hdev->id,
3965 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
3969 mgmt_pending_remove(cmd);
3974 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3976 struct cmd_lookup match = { NULL, hdev };
3977 bool changed = false;
3981 u8 mgmt_err = mgmt_status(status);
3983 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
3985 err = new_settings(hdev, NULL);
3987 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
3994 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3997 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4001 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
4004 err = new_settings(hdev, match.sk);
4012 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4013 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4014 ssp, u8 *eir, u16 eir_len)
4017 struct mgmt_ev_device_found *ev = (void *) buf;
4020 /* Leave 5 bytes for a potential CoD field */
4021 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4024 memset(buf, 0, sizeof(buf));
4026 bacpy(&ev->addr.bdaddr, bdaddr);
4027 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4030 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4032 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4035 memcpy(ev->eir, eir, eir_len);
4037 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4038 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4041 ev->eir_len = cpu_to_le16(eir_len);
4042 ev_size = sizeof(*ev) + eir_len;
4044 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4047 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4048 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4050 struct mgmt_ev_device_found *ev;
4051 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4054 ev = (struct mgmt_ev_device_found *) buf;
4056 memset(buf, 0, sizeof(buf));
4058 bacpy(&ev->addr.bdaddr, bdaddr);
4059 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4062 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4065 ev->eir_len = cpu_to_le16(eir_len);
4067 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
4068 sizeof(*ev) + eir_len, NULL);
4071 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
4073 struct pending_cmd *cmd;
4077 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4079 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4083 type = hdev->discovery.type;
4085 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4086 &type, sizeof(type));
4087 mgmt_pending_remove(cmd);
4092 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
4094 struct pending_cmd *cmd;
4097 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4101 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4102 &hdev->discovery.type, sizeof(hdev->discovery.type));
4103 mgmt_pending_remove(cmd);
4108 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4110 struct mgmt_ev_discovering ev;
4111 struct pending_cmd *cmd;
4113 BT_DBG("%s discovering %u", hdev->name, discovering);
4116 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4118 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4121 u8 type = hdev->discovery.type;
4123 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4125 mgmt_pending_remove(cmd);
4128 memset(&ev, 0, sizeof(ev));
4129 ev.type = hdev->discovery.type;
4130 ev.discovering = discovering;
4132 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4135 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4137 struct pending_cmd *cmd;
4138 struct mgmt_ev_device_blocked ev;
4140 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4142 bacpy(&ev.addr.bdaddr, bdaddr);
4143 ev.addr.type = type;
4145 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4146 cmd ? cmd->sk : NULL);
4149 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4151 struct pending_cmd *cmd;
4152 struct mgmt_ev_device_unblocked ev;
4154 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4156 bacpy(&ev.addr.bdaddr, bdaddr);
4157 ev.addr.type = type;
4159 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4160 cmd ? cmd->sk : NULL);
4163 module_param(enable_hs, bool, 0644);
4164 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");