2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
84 MGMT_EV_INDEX_REMOVED,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
106 * These LE scan and inquiry parameters were chosen according to LE General
107 * Discovery Procedure specification.
109 #define LE_SCAN_TYPE 0x01
110 #define LE_SCAN_WIN 0x12
111 #define LE_SCAN_INT 0x12
112 #define LE_SCAN_TIMEOUT_LE_ONLY 10240 /* TGAP(gen_disc_scan_min) */
113 #define LE_SCAN_TIMEOUT_BREDR_LE 5120 /* TGAP(100)/2 */
115 #define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
116 #define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
118 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
120 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
121 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
124 struct list_head list;
132 /* HCI to MGMT error code conversion table */
133 static u8 mgmt_status_table[] = {
135 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
136 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
137 MGMT_STATUS_FAILED, /* Hardware Failure */
138 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
139 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
140 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
141 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
142 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
143 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
144 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
145 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
146 MGMT_STATUS_BUSY, /* Command Disallowed */
147 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
148 MGMT_STATUS_REJECTED, /* Rejected Security */
149 MGMT_STATUS_REJECTED, /* Rejected Personal */
150 MGMT_STATUS_TIMEOUT, /* Host Timeout */
151 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
152 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
153 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
154 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
155 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
156 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
157 MGMT_STATUS_BUSY, /* Repeated Attempts */
158 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
159 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
161 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
162 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
163 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
165 MGMT_STATUS_FAILED, /* Unspecified Error */
166 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
167 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
168 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
169 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
170 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
171 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
172 MGMT_STATUS_FAILED, /* Unit Link Key Used */
173 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
174 MGMT_STATUS_TIMEOUT, /* Instant Passed */
175 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
176 MGMT_STATUS_FAILED, /* Transaction Collision */
177 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
178 MGMT_STATUS_REJECTED, /* QoS Rejected */
179 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
180 MGMT_STATUS_REJECTED, /* Insufficient Security */
181 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
182 MGMT_STATUS_BUSY, /* Role Switch Pending */
183 MGMT_STATUS_FAILED, /* Slot Violation */
184 MGMT_STATUS_FAILED, /* Role Switch Failed */
185 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
186 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
187 MGMT_STATUS_BUSY, /* Host Busy Pairing */
188 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
189 MGMT_STATUS_BUSY, /* Controller Busy */
190 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
191 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
192 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
193 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
194 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
197 bool mgmt_valid_hdev(struct hci_dev *hdev)
199 return hdev->dev_type == HCI_BREDR;
202 static u8 mgmt_status(u8 hci_status)
204 if (hci_status < ARRAY_SIZE(mgmt_status_table))
205 return mgmt_status_table[hci_status];
207 return MGMT_STATUS_FAILED;
210 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
213 struct mgmt_hdr *hdr;
214 struct mgmt_ev_cmd_status *ev;
217 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
219 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
223 hdr = (void *) skb_put(skb, sizeof(*hdr));
225 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
226 hdr->index = cpu_to_le16(index);
227 hdr->len = cpu_to_le16(sizeof(*ev));
229 ev = (void *) skb_put(skb, sizeof(*ev));
231 ev->opcode = cpu_to_le16(cmd);
233 err = sock_queue_rcv_skb(sk, skb);
240 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
241 void *rp, size_t rp_len)
244 struct mgmt_hdr *hdr;
245 struct mgmt_ev_cmd_complete *ev;
248 BT_DBG("sock %p", sk);
250 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
254 hdr = (void *) skb_put(skb, sizeof(*hdr));
256 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
257 hdr->index = cpu_to_le16(index);
258 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
260 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
261 ev->opcode = cpu_to_le16(cmd);
265 memcpy(ev->data, rp, rp_len);
267 err = sock_queue_rcv_skb(sk, skb);
274 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
277 struct mgmt_rp_read_version rp;
279 BT_DBG("sock %p", sk);
281 rp.version = MGMT_VERSION;
282 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
284 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
288 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
291 struct mgmt_rp_read_commands *rp;
292 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
293 const u16 num_events = ARRAY_SIZE(mgmt_events);
298 BT_DBG("sock %p", sk);
300 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
302 rp = kmalloc(rp_size, GFP_KERNEL);
306 rp->num_commands = __constant_cpu_to_le16(num_commands);
307 rp->num_events = __constant_cpu_to_le16(num_events);
309 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
310 put_unaligned_le16(mgmt_commands[i], opcode);
312 for (i = 0; i < num_events; i++, opcode++)
313 put_unaligned_le16(mgmt_events[i], opcode);
315 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
322 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
325 struct mgmt_rp_read_index_list *rp;
331 BT_DBG("sock %p", sk);
333 read_lock(&hci_dev_list_lock);
336 list_for_each_entry(d, &hci_dev_list, list) {
337 if (!mgmt_valid_hdev(d))
343 rp_len = sizeof(*rp) + (2 * count);
344 rp = kmalloc(rp_len, GFP_ATOMIC);
346 read_unlock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (test_bit(HCI_SETUP, &d->dev_flags))
355 if (!mgmt_valid_hdev(d))
358 rp->index[count++] = cpu_to_le16(d->id);
359 BT_DBG("Added hci%u", d->id);
362 rp->num_controllers = cpu_to_le16(count);
363 rp_len = sizeof(*rp) + (2 * count);
365 read_unlock(&hci_dev_list_lock);
367 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
375 static u32 get_supported_settings(struct hci_dev *hdev)
379 settings |= MGMT_SETTING_POWERED;
380 settings |= MGMT_SETTING_PAIRABLE;
382 if (lmp_ssp_capable(hdev))
383 settings |= MGMT_SETTING_SSP;
385 if (lmp_bredr_capable(hdev)) {
386 settings |= MGMT_SETTING_CONNECTABLE;
387 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
388 settings |= MGMT_SETTING_FAST_CONNECTABLE;
389 settings |= MGMT_SETTING_DISCOVERABLE;
390 settings |= MGMT_SETTING_BREDR;
391 settings |= MGMT_SETTING_LINK_SECURITY;
395 settings |= MGMT_SETTING_HS;
397 if (lmp_le_capable(hdev))
398 settings |= MGMT_SETTING_LE;
403 static u32 get_current_settings(struct hci_dev *hdev)
407 if (hdev_is_powered(hdev))
408 settings |= MGMT_SETTING_POWERED;
410 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
411 settings |= MGMT_SETTING_CONNECTABLE;
413 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
414 settings |= MGMT_SETTING_FAST_CONNECTABLE;
416 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
417 settings |= MGMT_SETTING_DISCOVERABLE;
419 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
420 settings |= MGMT_SETTING_PAIRABLE;
422 if (lmp_bredr_capable(hdev))
423 settings |= MGMT_SETTING_BREDR;
425 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
426 settings |= MGMT_SETTING_LE;
428 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
429 settings |= MGMT_SETTING_LINK_SECURITY;
431 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
432 settings |= MGMT_SETTING_SSP;
434 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
435 settings |= MGMT_SETTING_HS;
440 #define PNP_INFO_SVCLASS_ID 0x1200
442 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
444 u8 *ptr = data, *uuids_start = NULL;
445 struct bt_uuid *uuid;
450 list_for_each_entry(uuid, &hdev->uuids, list) {
453 if (uuid->size != 16)
456 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
460 if (uuid16 == PNP_INFO_SVCLASS_ID)
466 uuids_start[1] = EIR_UUID16_ALL;
470 /* Stop if not enough space to put next UUID */
471 if ((ptr - data) + sizeof(u16) > len) {
472 uuids_start[1] = EIR_UUID16_SOME;
476 *ptr++ = (uuid16 & 0x00ff);
477 *ptr++ = (uuid16 & 0xff00) >> 8;
478 uuids_start[0] += sizeof(uuid16);
484 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
486 u8 *ptr = data, *uuids_start = NULL;
487 struct bt_uuid *uuid;
492 list_for_each_entry(uuid, &hdev->uuids, list) {
493 if (uuid->size != 32)
499 uuids_start[1] = EIR_UUID32_ALL;
503 /* Stop if not enough space to put next UUID */
504 if ((ptr - data) + sizeof(u32) > len) {
505 uuids_start[1] = EIR_UUID32_SOME;
509 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
511 uuids_start[0] += sizeof(u32);
517 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
519 u8 *ptr = data, *uuids_start = NULL;
520 struct bt_uuid *uuid;
525 list_for_each_entry(uuid, &hdev->uuids, list) {
526 if (uuid->size != 128)
532 uuids_start[1] = EIR_UUID128_ALL;
536 /* Stop if not enough space to put next UUID */
537 if ((ptr - data) + 16 > len) {
538 uuids_start[1] = EIR_UUID128_SOME;
542 memcpy(ptr, uuid->uuid, 16);
544 uuids_start[0] += 16;
550 static void create_eir(struct hci_dev *hdev, u8 *data)
555 name_len = strlen(hdev->dev_name);
561 ptr[1] = EIR_NAME_SHORT;
563 ptr[1] = EIR_NAME_COMPLETE;
565 /* EIR Data length */
566 ptr[0] = name_len + 1;
568 memcpy(ptr + 2, hdev->dev_name, name_len);
570 ptr += (name_len + 2);
573 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
575 ptr[1] = EIR_TX_POWER;
576 ptr[2] = (u8) hdev->inq_tx_power;
581 if (hdev->devid_source > 0) {
583 ptr[1] = EIR_DEVICE_ID;
585 put_unaligned_le16(hdev->devid_source, ptr + 2);
586 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
587 put_unaligned_le16(hdev->devid_product, ptr + 6);
588 put_unaligned_le16(hdev->devid_version, ptr + 8);
593 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
594 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
595 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
598 static void update_eir(struct hci_request *req)
600 struct hci_dev *hdev = req->hdev;
601 struct hci_cp_write_eir cp;
603 if (!hdev_is_powered(hdev))
606 if (!lmp_ext_inq_capable(hdev))
609 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
612 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
615 memset(&cp, 0, sizeof(cp));
617 create_eir(hdev, cp.data);
619 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
622 memcpy(hdev->eir, cp.data, sizeof(cp.data));
624 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
627 static u8 get_service_classes(struct hci_dev *hdev)
629 struct bt_uuid *uuid;
632 list_for_each_entry(uuid, &hdev->uuids, list)
633 val |= uuid->svc_hint;
638 static void update_class(struct hci_request *req)
640 struct hci_dev *hdev = req->hdev;
643 BT_DBG("%s", hdev->name);
645 if (!hdev_is_powered(hdev))
648 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
651 cod[0] = hdev->minor_class;
652 cod[1] = hdev->major_class;
653 cod[2] = get_service_classes(hdev);
655 if (memcmp(cod, hdev->dev_class, 3) == 0)
658 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
661 static void service_cache_off(struct work_struct *work)
663 struct hci_dev *hdev = container_of(work, struct hci_dev,
665 struct hci_request req;
667 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
670 hci_req_init(&req, hdev);
677 hci_dev_unlock(hdev);
679 hci_req_run(&req, NULL);
682 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
684 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
687 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
689 /* Non-mgmt controlled devices get this bit set
690 * implicitly so that pairing works for them, however
691 * for mgmt we require user-space to explicitly enable
694 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
697 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
698 void *data, u16 data_len)
700 struct mgmt_rp_read_info rp;
702 BT_DBG("sock %p %s", sk, hdev->name);
706 memset(&rp, 0, sizeof(rp));
708 bacpy(&rp.bdaddr, &hdev->bdaddr);
710 rp.version = hdev->hci_ver;
711 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
713 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
714 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
716 memcpy(rp.dev_class, hdev->dev_class, 3);
718 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
719 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
721 hci_dev_unlock(hdev);
723 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
727 static void mgmt_pending_free(struct pending_cmd *cmd)
734 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
735 struct hci_dev *hdev, void *data,
738 struct pending_cmd *cmd;
740 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
744 cmd->opcode = opcode;
745 cmd->index = hdev->id;
747 cmd->param = kmalloc(len, GFP_KERNEL);
754 memcpy(cmd->param, data, len);
759 list_add(&cmd->list, &hdev->mgmt_pending);
764 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
765 void (*cb)(struct pending_cmd *cmd,
769 struct pending_cmd *cmd, *tmp;
771 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
772 if (opcode > 0 && cmd->opcode != opcode)
779 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
781 struct pending_cmd *cmd;
783 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
784 if (cmd->opcode == opcode)
791 static void mgmt_pending_remove(struct pending_cmd *cmd)
793 list_del(&cmd->list);
794 mgmt_pending_free(cmd);
797 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
799 __le32 settings = cpu_to_le32(get_current_settings(hdev));
801 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
805 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
808 struct mgmt_mode *cp = data;
809 struct pending_cmd *cmd;
812 BT_DBG("request for %s", hdev->name);
814 if (cp->val != 0x00 && cp->val != 0x01)
815 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
816 MGMT_STATUS_INVALID_PARAMS);
820 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
821 cancel_delayed_work(&hdev->power_off);
824 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
826 err = mgmt_powered(hdev, 1);
831 if (!!cp->val == hdev_is_powered(hdev)) {
832 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
836 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
837 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
842 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
849 queue_work(hdev->req_workqueue, &hdev->power_on);
851 queue_work(hdev->req_workqueue, &hdev->power_off.work);
856 hci_dev_unlock(hdev);
860 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
861 struct sock *skip_sk)
864 struct mgmt_hdr *hdr;
866 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
870 hdr = (void *) skb_put(skb, sizeof(*hdr));
871 hdr->opcode = cpu_to_le16(event);
873 hdr->index = cpu_to_le16(hdev->id);
875 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
876 hdr->len = cpu_to_le16(data_len);
879 memcpy(skb_put(skb, data_len), data, data_len);
882 __net_timestamp(skb);
884 hci_send_to_control(skb, skip_sk);
890 static int new_settings(struct hci_dev *hdev, struct sock *skip)
894 ev = cpu_to_le32(get_current_settings(hdev));
896 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
899 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
902 struct mgmt_cp_set_discoverable *cp = data;
903 struct pending_cmd *cmd;
908 BT_DBG("request for %s", hdev->name);
910 if (!lmp_bredr_capable(hdev))
911 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
912 MGMT_STATUS_NOT_SUPPORTED);
914 if (cp->val != 0x00 && cp->val != 0x01)
915 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
916 MGMT_STATUS_INVALID_PARAMS);
918 timeout = __le16_to_cpu(cp->timeout);
919 if (!cp->val && timeout > 0)
920 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
921 MGMT_STATUS_INVALID_PARAMS);
925 if (!hdev_is_powered(hdev) && timeout > 0) {
926 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
927 MGMT_STATUS_NOT_POWERED);
931 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
932 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
933 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
938 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
939 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
940 MGMT_STATUS_REJECTED);
944 if (!hdev_is_powered(hdev)) {
945 bool changed = false;
947 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
948 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
952 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
957 err = new_settings(hdev, sk);
962 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
963 if (hdev->discov_timeout > 0) {
964 cancel_delayed_work(&hdev->discov_off);
965 hdev->discov_timeout = 0;
968 if (cp->val && timeout > 0) {
969 hdev->discov_timeout = timeout;
970 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
971 msecs_to_jiffies(hdev->discov_timeout * 1000));
974 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
978 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
987 scan |= SCAN_INQUIRY;
989 cancel_delayed_work(&hdev->discov_off);
991 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
993 mgmt_pending_remove(cmd);
996 hdev->discov_timeout = timeout;
999 hci_dev_unlock(hdev);
1003 static void write_fast_connectable(struct hci_request *req, bool enable)
1005 struct hci_dev *hdev = req->hdev;
1006 struct hci_cp_write_page_scan_activity acp;
1009 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1013 type = PAGE_SCAN_TYPE_INTERLACED;
1015 /* 160 msec page scan interval */
1016 acp.interval = __constant_cpu_to_le16(0x0100);
1018 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1020 /* default 1.28 sec page scan */
1021 acp.interval = __constant_cpu_to_le16(0x0800);
1024 acp.window = __constant_cpu_to_le16(0x0012);
1026 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1027 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1028 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1031 if (hdev->page_scan_type != type)
1032 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1035 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1037 struct pending_cmd *cmd;
1039 BT_DBG("status 0x%02x", status);
1043 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1047 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1049 mgmt_pending_remove(cmd);
1052 hci_dev_unlock(hdev);
1055 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1058 struct mgmt_mode *cp = data;
1059 struct pending_cmd *cmd;
1060 struct hci_request req;
1064 BT_DBG("request for %s", hdev->name);
1066 if (!lmp_bredr_capable(hdev))
1067 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1068 MGMT_STATUS_NOT_SUPPORTED);
1070 if (cp->val != 0x00 && cp->val != 0x01)
1071 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1072 MGMT_STATUS_INVALID_PARAMS);
1076 if (!hdev_is_powered(hdev)) {
1077 bool changed = false;
1079 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1083 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1085 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1086 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1089 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1094 err = new_settings(hdev, sk);
1099 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1100 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1101 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1106 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1107 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1111 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1122 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1123 hdev->discov_timeout > 0)
1124 cancel_delayed_work(&hdev->discov_off);
1127 hci_req_init(&req, hdev);
1129 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1131 /* If we're going from non-connectable to connectable or
1132 * vice-versa when fast connectable is enabled ensure that fast
1133 * connectable gets disabled. write_fast_connectable won't do
1134 * anything if the page scan parameters are already what they
1137 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1138 write_fast_connectable(&req, false);
1140 err = hci_req_run(&req, set_connectable_complete);
1142 mgmt_pending_remove(cmd);
1145 hci_dev_unlock(hdev);
1149 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1152 struct mgmt_mode *cp = data;
1155 BT_DBG("request for %s", hdev->name);
1157 if (cp->val != 0x00 && cp->val != 0x01)
1158 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1159 MGMT_STATUS_INVALID_PARAMS);
1164 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1166 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1168 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1172 err = new_settings(hdev, sk);
1175 hci_dev_unlock(hdev);
1179 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1182 struct mgmt_mode *cp = data;
1183 struct pending_cmd *cmd;
1187 BT_DBG("request for %s", hdev->name);
1189 if (!lmp_bredr_capable(hdev))
1190 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1191 MGMT_STATUS_NOT_SUPPORTED);
1193 if (cp->val != 0x00 && cp->val != 0x01)
1194 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1195 MGMT_STATUS_INVALID_PARAMS);
1199 if (!hdev_is_powered(hdev)) {
1200 bool changed = false;
1202 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1203 &hdev->dev_flags)) {
1204 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1208 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1213 err = new_settings(hdev, sk);
1218 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1219 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1226 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1227 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1231 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1237 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1239 mgmt_pending_remove(cmd);
1244 hci_dev_unlock(hdev);
1248 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1250 struct mgmt_mode *cp = data;
1251 struct pending_cmd *cmd;
1255 BT_DBG("request for %s", hdev->name);
1257 if (!lmp_ssp_capable(hdev))
1258 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1259 MGMT_STATUS_NOT_SUPPORTED);
1261 if (cp->val != 0x00 && cp->val != 0x01)
1262 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1263 MGMT_STATUS_INVALID_PARAMS);
1269 if (!hdev_is_powered(hdev)) {
1270 bool changed = false;
1272 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1273 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1277 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1282 err = new_settings(hdev, sk);
1287 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1288 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1293 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1294 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1298 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1304 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1306 mgmt_pending_remove(cmd);
1311 hci_dev_unlock(hdev);
1315 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1317 struct mgmt_mode *cp = data;
1319 BT_DBG("request for %s", hdev->name);
1322 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1323 MGMT_STATUS_NOT_SUPPORTED);
1325 if (cp->val != 0x00 && cp->val != 0x01)
1326 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1327 MGMT_STATUS_INVALID_PARAMS);
1330 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1332 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1334 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1337 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1339 struct mgmt_mode *cp = data;
1340 struct hci_cp_write_le_host_supported hci_cp;
1341 struct pending_cmd *cmd;
1345 BT_DBG("request for %s", hdev->name);
1347 if (!lmp_le_capable(hdev))
1348 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1349 MGMT_STATUS_NOT_SUPPORTED);
1351 if (cp->val != 0x00 && cp->val != 0x01)
1352 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1353 MGMT_STATUS_INVALID_PARAMS);
1358 enabled = lmp_host_le_capable(hdev);
1360 if (!hdev_is_powered(hdev) || val == enabled) {
1361 bool changed = false;
1363 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1364 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1368 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1373 err = new_settings(hdev, sk);
1378 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1379 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1384 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1390 memset(&hci_cp, 0, sizeof(hci_cp));
1394 hci_cp.simul = lmp_le_br_capable(hdev);
1397 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1400 mgmt_pending_remove(cmd);
1403 hci_dev_unlock(hdev);
1407 /* This is a helper function to test for pending mgmt commands that can
1408 * cause CoD or EIR HCI commands. We can only allow one such pending
1409 * mgmt command at a time since otherwise we cannot easily track what
1410 * the current values are, will be, and based on that calculate if a new
1411 * HCI command needs to be sent and if yes with what value.
1413 static bool pending_eir_or_class(struct hci_dev *hdev)
1415 struct pending_cmd *cmd;
1417 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1418 switch (cmd->opcode) {
1419 case MGMT_OP_ADD_UUID:
1420 case MGMT_OP_REMOVE_UUID:
1421 case MGMT_OP_SET_DEV_CLASS:
1422 case MGMT_OP_SET_POWERED:
1430 static const u8 bluetooth_base_uuid[] = {
1431 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1432 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1435 static u8 get_uuid_size(const u8 *uuid)
1439 if (memcmp(uuid, bluetooth_base_uuid, 12))
1442 val = get_unaligned_le32(&uuid[12]);
1449 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1451 struct pending_cmd *cmd;
1455 cmd = mgmt_pending_find(mgmt_op, hdev);
1459 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1460 hdev->dev_class, 3);
1462 mgmt_pending_remove(cmd);
1465 hci_dev_unlock(hdev);
1468 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1470 BT_DBG("status 0x%02x", status);
1472 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1475 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1477 struct mgmt_cp_add_uuid *cp = data;
1478 struct pending_cmd *cmd;
1479 struct hci_request req;
1480 struct bt_uuid *uuid;
1483 BT_DBG("request for %s", hdev->name);
1487 if (pending_eir_or_class(hdev)) {
1488 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1493 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1499 memcpy(uuid->uuid, cp->uuid, 16);
1500 uuid->svc_hint = cp->svc_hint;
1501 uuid->size = get_uuid_size(cp->uuid);
1503 list_add_tail(&uuid->list, &hdev->uuids);
1505 hci_req_init(&req, hdev);
1510 err = hci_req_run(&req, add_uuid_complete);
1512 if (err != -ENODATA)
1515 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1516 hdev->dev_class, 3);
1520 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1529 hci_dev_unlock(hdev);
1533 static bool enable_service_cache(struct hci_dev *hdev)
1535 if (!hdev_is_powered(hdev))
1538 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1539 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1547 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1549 BT_DBG("status 0x%02x", status);
1551 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1554 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1557 struct mgmt_cp_remove_uuid *cp = data;
1558 struct pending_cmd *cmd;
1559 struct bt_uuid *match, *tmp;
1560 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1561 struct hci_request req;
1564 BT_DBG("request for %s", hdev->name);
1568 if (pending_eir_or_class(hdev)) {
1569 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1574 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1575 err = hci_uuids_clear(hdev);
1577 if (enable_service_cache(hdev)) {
1578 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1579 0, hdev->dev_class, 3);
1588 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1589 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1592 list_del(&match->list);
1598 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1599 MGMT_STATUS_INVALID_PARAMS);
1604 hci_req_init(&req, hdev);
1609 err = hci_req_run(&req, remove_uuid_complete);
1611 if (err != -ENODATA)
1614 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1615 hdev->dev_class, 3);
1619 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1628 hci_dev_unlock(hdev);
1632 static void set_class_complete(struct hci_dev *hdev, u8 status)
1634 BT_DBG("status 0x%02x", status);
1636 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1639 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1642 struct mgmt_cp_set_dev_class *cp = data;
1643 struct pending_cmd *cmd;
1644 struct hci_request req;
1647 BT_DBG("request for %s", hdev->name);
1649 if (!lmp_bredr_capable(hdev))
1650 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1651 MGMT_STATUS_NOT_SUPPORTED);
1655 if (pending_eir_or_class(hdev)) {
1656 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1661 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1662 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1663 MGMT_STATUS_INVALID_PARAMS);
1667 hdev->major_class = cp->major;
1668 hdev->minor_class = cp->minor;
1670 if (!hdev_is_powered(hdev)) {
1671 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1672 hdev->dev_class, 3);
1676 hci_req_init(&req, hdev);
1678 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1679 hci_dev_unlock(hdev);
1680 cancel_delayed_work_sync(&hdev->service_cache);
1687 err = hci_req_run(&req, set_class_complete);
1689 if (err != -ENODATA)
1692 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1693 hdev->dev_class, 3);
1697 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1706 hci_dev_unlock(hdev);
1710 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1713 struct mgmt_cp_load_link_keys *cp = data;
1714 u16 key_count, expected_len;
1717 key_count = __le16_to_cpu(cp->key_count);
1719 expected_len = sizeof(*cp) + key_count *
1720 sizeof(struct mgmt_link_key_info);
1721 if (expected_len != len) {
1722 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1724 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1725 MGMT_STATUS_INVALID_PARAMS);
1728 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1729 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1730 MGMT_STATUS_INVALID_PARAMS);
1732 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1735 for (i = 0; i < key_count; i++) {
1736 struct mgmt_link_key_info *key = &cp->keys[i];
1738 if (key->addr.type != BDADDR_BREDR)
1739 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1740 MGMT_STATUS_INVALID_PARAMS);
1745 hci_link_keys_clear(hdev);
1747 set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1750 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1752 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1754 for (i = 0; i < key_count; i++) {
1755 struct mgmt_link_key_info *key = &cp->keys[i];
1757 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1758 key->type, key->pin_len);
1761 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1763 hci_dev_unlock(hdev);
1768 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1769 u8 addr_type, struct sock *skip_sk)
1771 struct mgmt_ev_device_unpaired ev;
1773 bacpy(&ev.addr.bdaddr, bdaddr);
1774 ev.addr.type = addr_type;
1776 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1780 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1783 struct mgmt_cp_unpair_device *cp = data;
1784 struct mgmt_rp_unpair_device rp;
1785 struct hci_cp_disconnect dc;
1786 struct pending_cmd *cmd;
1787 struct hci_conn *conn;
1790 memset(&rp, 0, sizeof(rp));
1791 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1792 rp.addr.type = cp->addr.type;
1794 if (!bdaddr_type_is_valid(cp->addr.type))
1795 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1796 MGMT_STATUS_INVALID_PARAMS,
1799 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1800 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1801 MGMT_STATUS_INVALID_PARAMS,
1806 if (!hdev_is_powered(hdev)) {
1807 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1808 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1812 if (cp->addr.type == BDADDR_BREDR)
1813 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1815 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1818 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1819 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1823 if (cp->disconnect) {
1824 if (cp->addr.type == BDADDR_BREDR)
1825 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1828 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1835 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1837 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1841 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1848 dc.handle = cpu_to_le16(conn->handle);
1849 dc.reason = 0x13; /* Remote User Terminated Connection */
1850 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1852 mgmt_pending_remove(cmd);
1855 hci_dev_unlock(hdev);
1859 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1862 struct mgmt_cp_disconnect *cp = data;
1863 struct mgmt_rp_disconnect rp;
1864 struct hci_cp_disconnect dc;
1865 struct pending_cmd *cmd;
1866 struct hci_conn *conn;
1871 memset(&rp, 0, sizeof(rp));
1872 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1873 rp.addr.type = cp->addr.type;
1875 if (!bdaddr_type_is_valid(cp->addr.type))
1876 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1877 MGMT_STATUS_INVALID_PARAMS,
1882 if (!test_bit(HCI_UP, &hdev->flags)) {
1883 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1884 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1888 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1889 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1890 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1894 if (cp->addr.type == BDADDR_BREDR)
1895 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1898 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1900 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1901 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1902 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1906 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1912 dc.handle = cpu_to_le16(conn->handle);
1913 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1915 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1917 mgmt_pending_remove(cmd);
1920 hci_dev_unlock(hdev);
1924 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1926 switch (link_type) {
1928 switch (addr_type) {
1929 case ADDR_LE_DEV_PUBLIC:
1930 return BDADDR_LE_PUBLIC;
1933 /* Fallback to LE Random address type */
1934 return BDADDR_LE_RANDOM;
1938 /* Fallback to BR/EDR type */
1939 return BDADDR_BREDR;
1943 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1946 struct mgmt_rp_get_connections *rp;
1956 if (!hdev_is_powered(hdev)) {
1957 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1958 MGMT_STATUS_NOT_POWERED);
1963 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1964 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1968 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1969 rp = kmalloc(rp_len, GFP_KERNEL);
1976 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1977 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1979 bacpy(&rp->addr[i].bdaddr, &c->dst);
1980 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1981 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1986 rp->conn_count = cpu_to_le16(i);
1988 /* Recalculate length in case of filtered SCO connections, etc */
1989 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1991 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1997 hci_dev_unlock(hdev);
2001 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2002 struct mgmt_cp_pin_code_neg_reply *cp)
2004 struct pending_cmd *cmd;
2007 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2012 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2013 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2015 mgmt_pending_remove(cmd);
2020 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2023 struct hci_conn *conn;
2024 struct mgmt_cp_pin_code_reply *cp = data;
2025 struct hci_cp_pin_code_reply reply;
2026 struct pending_cmd *cmd;
2033 if (!hdev_is_powered(hdev)) {
2034 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2035 MGMT_STATUS_NOT_POWERED);
2039 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2041 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2042 MGMT_STATUS_NOT_CONNECTED);
2046 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2047 struct mgmt_cp_pin_code_neg_reply ncp;
2049 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2051 BT_ERR("PIN code is not 16 bytes long");
2053 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2055 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2056 MGMT_STATUS_INVALID_PARAMS);
2061 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2067 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2068 reply.pin_len = cp->pin_len;
2069 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2071 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2073 mgmt_pending_remove(cmd);
2076 hci_dev_unlock(hdev);
2080 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2083 struct mgmt_cp_set_io_capability *cp = data;
2089 hdev->io_capability = cp->io_capability;
2091 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2092 hdev->io_capability);
2094 hci_dev_unlock(hdev);
2096 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2100 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2102 struct hci_dev *hdev = conn->hdev;
2103 struct pending_cmd *cmd;
2105 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2106 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2109 if (cmd->user_data != conn)
2118 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2120 struct mgmt_rp_pair_device rp;
2121 struct hci_conn *conn = cmd->user_data;
2123 bacpy(&rp.addr.bdaddr, &conn->dst);
2124 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2126 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2129 /* So we don't get further callbacks for this connection */
2130 conn->connect_cfm_cb = NULL;
2131 conn->security_cfm_cb = NULL;
2132 conn->disconn_cfm_cb = NULL;
2136 mgmt_pending_remove(cmd);
2139 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2141 struct pending_cmd *cmd;
2143 BT_DBG("status %u", status);
2145 cmd = find_pairing(conn);
2147 BT_DBG("Unable to find a pending command");
2149 pairing_complete(cmd, mgmt_status(status));
2152 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2154 struct pending_cmd *cmd;
2156 BT_DBG("status %u", status);
2161 cmd = find_pairing(conn);
2163 BT_DBG("Unable to find a pending command");
2165 pairing_complete(cmd, mgmt_status(status));
2168 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2171 struct mgmt_cp_pair_device *cp = data;
2172 struct mgmt_rp_pair_device rp;
2173 struct pending_cmd *cmd;
2174 u8 sec_level, auth_type;
2175 struct hci_conn *conn;
2180 memset(&rp, 0, sizeof(rp));
2181 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2182 rp.addr.type = cp->addr.type;
2184 if (!bdaddr_type_is_valid(cp->addr.type))
2185 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2186 MGMT_STATUS_INVALID_PARAMS,
2191 if (!hdev_is_powered(hdev)) {
2192 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2193 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2197 sec_level = BT_SECURITY_MEDIUM;
2198 if (cp->io_cap == 0x03)
2199 auth_type = HCI_AT_DEDICATED_BONDING;
2201 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2203 if (cp->addr.type == BDADDR_BREDR)
2204 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2205 cp->addr.type, sec_level, auth_type);
2207 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2208 cp->addr.type, sec_level, auth_type);
2213 if (PTR_ERR(conn) == -EBUSY)
2214 status = MGMT_STATUS_BUSY;
2216 status = MGMT_STATUS_CONNECT_FAILED;
2218 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2224 if (conn->connect_cfm_cb) {
2226 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2227 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2231 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2238 /* For LE, just connecting isn't a proof that the pairing finished */
2239 if (cp->addr.type == BDADDR_BREDR)
2240 conn->connect_cfm_cb = pairing_complete_cb;
2242 conn->connect_cfm_cb = le_connect_complete_cb;
2244 conn->security_cfm_cb = pairing_complete_cb;
2245 conn->disconn_cfm_cb = pairing_complete_cb;
2246 conn->io_capability = cp->io_cap;
2247 cmd->user_data = conn;
2249 if (conn->state == BT_CONNECTED &&
2250 hci_conn_security(conn, sec_level, auth_type))
2251 pairing_complete(cmd, 0);
2256 hci_dev_unlock(hdev);
2260 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2263 struct mgmt_addr_info *addr = data;
2264 struct pending_cmd *cmd;
2265 struct hci_conn *conn;
2272 if (!hdev_is_powered(hdev)) {
2273 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2274 MGMT_STATUS_NOT_POWERED);
2278 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2280 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2281 MGMT_STATUS_INVALID_PARAMS);
2285 conn = cmd->user_data;
2287 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2288 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2289 MGMT_STATUS_INVALID_PARAMS);
2293 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2295 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2296 addr, sizeof(*addr));
2298 hci_dev_unlock(hdev);
2302 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2303 struct mgmt_addr_info *addr, u16 mgmt_op,
2304 u16 hci_op, __le32 passkey)
2306 struct pending_cmd *cmd;
2307 struct hci_conn *conn;
2312 if (!hdev_is_powered(hdev)) {
2313 err = cmd_complete(sk, hdev->id, mgmt_op,
2314 MGMT_STATUS_NOT_POWERED, addr,
2319 if (addr->type == BDADDR_BREDR)
2320 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2322 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2325 err = cmd_complete(sk, hdev->id, mgmt_op,
2326 MGMT_STATUS_NOT_CONNECTED, addr,
2331 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2332 /* Continue with pairing via SMP */
2333 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2336 err = cmd_complete(sk, hdev->id, mgmt_op,
2337 MGMT_STATUS_SUCCESS, addr,
2340 err = cmd_complete(sk, hdev->id, mgmt_op,
2341 MGMT_STATUS_FAILED, addr,
2347 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2353 /* Continue with pairing via HCI */
2354 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2355 struct hci_cp_user_passkey_reply cp;
2357 bacpy(&cp.bdaddr, &addr->bdaddr);
2358 cp.passkey = passkey;
2359 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2361 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2365 mgmt_pending_remove(cmd);
2368 hci_dev_unlock(hdev);
2372 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2373 void *data, u16 len)
2375 struct mgmt_cp_pin_code_neg_reply *cp = data;
2379 return user_pairing_resp(sk, hdev, &cp->addr,
2380 MGMT_OP_PIN_CODE_NEG_REPLY,
2381 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2384 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2387 struct mgmt_cp_user_confirm_reply *cp = data;
2391 if (len != sizeof(*cp))
2392 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2393 MGMT_STATUS_INVALID_PARAMS);
2395 return user_pairing_resp(sk, hdev, &cp->addr,
2396 MGMT_OP_USER_CONFIRM_REPLY,
2397 HCI_OP_USER_CONFIRM_REPLY, 0);
2400 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2401 void *data, u16 len)
2403 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2407 return user_pairing_resp(sk, hdev, &cp->addr,
2408 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2409 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2412 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2415 struct mgmt_cp_user_passkey_reply *cp = data;
2419 return user_pairing_resp(sk, hdev, &cp->addr,
2420 MGMT_OP_USER_PASSKEY_REPLY,
2421 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2424 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2425 void *data, u16 len)
2427 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2431 return user_pairing_resp(sk, hdev, &cp->addr,
2432 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2433 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2436 static void update_name(struct hci_request *req)
2438 struct hci_dev *hdev = req->hdev;
2439 struct hci_cp_write_local_name cp;
2441 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2443 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2446 static void set_name_complete(struct hci_dev *hdev, u8 status)
2448 struct mgmt_cp_set_local_name *cp;
2449 struct pending_cmd *cmd;
2451 BT_DBG("status 0x%02x", status);
2455 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2462 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2463 mgmt_status(status));
2465 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2468 mgmt_pending_remove(cmd);
2471 hci_dev_unlock(hdev);
2474 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2477 struct mgmt_cp_set_local_name *cp = data;
2478 struct pending_cmd *cmd;
2479 struct hci_request req;
2486 /* If the old values are the same as the new ones just return a
2487 * direct command complete event.
2489 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2490 !memcmp(hdev->short_name, cp->short_name,
2491 sizeof(hdev->short_name))) {
2492 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2497 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2499 if (!hdev_is_powered(hdev)) {
2500 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2502 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2507 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2513 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2519 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2521 hci_req_init(&req, hdev);
2523 if (lmp_bredr_capable(hdev)) {
2528 if (lmp_le_capable(hdev))
2529 hci_update_ad(&req);
2531 err = hci_req_run(&req, set_name_complete);
2533 mgmt_pending_remove(cmd);
2536 hci_dev_unlock(hdev);
2540 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2541 void *data, u16 data_len)
2543 struct pending_cmd *cmd;
2546 BT_DBG("%s", hdev->name);
2550 if (!hdev_is_powered(hdev)) {
2551 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2552 MGMT_STATUS_NOT_POWERED);
2556 if (!lmp_ssp_capable(hdev)) {
2557 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2558 MGMT_STATUS_NOT_SUPPORTED);
2562 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2563 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2568 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2574 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2576 mgmt_pending_remove(cmd);
2579 hci_dev_unlock(hdev);
2583 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2584 void *data, u16 len)
2586 struct mgmt_cp_add_remote_oob_data *cp = data;
2590 BT_DBG("%s ", hdev->name);
2594 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2597 status = MGMT_STATUS_FAILED;
2599 status = MGMT_STATUS_SUCCESS;
2601 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2602 &cp->addr, sizeof(cp->addr));
2604 hci_dev_unlock(hdev);
2608 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2609 void *data, u16 len)
2611 struct mgmt_cp_remove_remote_oob_data *cp = data;
2615 BT_DBG("%s", hdev->name);
2619 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2621 status = MGMT_STATUS_INVALID_PARAMS;
2623 status = MGMT_STATUS_SUCCESS;
2625 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2626 status, &cp->addr, sizeof(cp->addr));
2628 hci_dev_unlock(hdev);
2632 int mgmt_interleaved_discovery(struct hci_dev *hdev)
2636 BT_DBG("%s", hdev->name);
2640 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
2642 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2644 hci_dev_unlock(hdev);
2649 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2650 void *data, u16 len)
2652 struct mgmt_cp_start_discovery *cp = data;
2653 struct pending_cmd *cmd;
2656 BT_DBG("%s", hdev->name);
2660 if (!hdev_is_powered(hdev)) {
2661 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2662 MGMT_STATUS_NOT_POWERED);
2666 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2667 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2672 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2673 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2678 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2684 hdev->discovery.type = cp->type;
2686 switch (hdev->discovery.type) {
2687 case DISCOV_TYPE_BREDR:
2688 if (!lmp_bredr_capable(hdev)) {
2689 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2690 MGMT_STATUS_NOT_SUPPORTED);
2691 mgmt_pending_remove(cmd);
2695 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2698 case DISCOV_TYPE_LE:
2699 if (!lmp_host_le_capable(hdev)) {
2700 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2701 MGMT_STATUS_NOT_SUPPORTED);
2702 mgmt_pending_remove(cmd);
2706 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2707 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2710 case DISCOV_TYPE_INTERLEAVED:
2711 if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
2712 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2713 MGMT_STATUS_NOT_SUPPORTED);
2714 mgmt_pending_remove(cmd);
2718 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, LE_SCAN_WIN,
2719 LE_SCAN_TIMEOUT_BREDR_LE);
2723 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2724 MGMT_STATUS_INVALID_PARAMS);
2725 mgmt_pending_remove(cmd);
2730 mgmt_pending_remove(cmd);
2732 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2735 hci_dev_unlock(hdev);
2739 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2742 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2743 struct pending_cmd *cmd;
2744 struct hci_cp_remote_name_req_cancel cp;
2745 struct inquiry_entry *e;
2748 BT_DBG("%s", hdev->name);
2752 if (!hci_discovery_active(hdev)) {
2753 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2754 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2755 sizeof(mgmt_cp->type));
2759 if (hdev->discovery.type != mgmt_cp->type) {
2760 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2761 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2762 sizeof(mgmt_cp->type));
2766 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2772 switch (hdev->discovery.state) {
2773 case DISCOVERY_FINDING:
2774 if (test_bit(HCI_INQUIRY, &hdev->flags))
2775 err = hci_cancel_inquiry(hdev);
2777 err = hci_cancel_le_scan(hdev);
2781 case DISCOVERY_RESOLVING:
2782 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2785 mgmt_pending_remove(cmd);
2786 err = cmd_complete(sk, hdev->id,
2787 MGMT_OP_STOP_DISCOVERY, 0,
2789 sizeof(mgmt_cp->type));
2790 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2794 bacpy(&cp.bdaddr, &e->data.bdaddr);
2795 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2801 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2806 mgmt_pending_remove(cmd);
2808 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2811 hci_dev_unlock(hdev);
2815 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2818 struct mgmt_cp_confirm_name *cp = data;
2819 struct inquiry_entry *e;
2822 BT_DBG("%s", hdev->name);
2826 if (!hci_discovery_active(hdev)) {
2827 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2828 MGMT_STATUS_FAILED);
2832 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2834 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2835 MGMT_STATUS_INVALID_PARAMS);
2839 if (cp->name_known) {
2840 e->name_state = NAME_KNOWN;
2843 e->name_state = NAME_NEEDED;
2844 hci_inquiry_cache_update_resolve(hdev, e);
2847 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2851 hci_dev_unlock(hdev);
2855 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2858 struct mgmt_cp_block_device *cp = data;
2862 BT_DBG("%s", hdev->name);
2864 if (!bdaddr_type_is_valid(cp->addr.type))
2865 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2866 MGMT_STATUS_INVALID_PARAMS,
2867 &cp->addr, sizeof(cp->addr));
2871 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2873 status = MGMT_STATUS_FAILED;
2875 status = MGMT_STATUS_SUCCESS;
2877 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2878 &cp->addr, sizeof(cp->addr));
2880 hci_dev_unlock(hdev);
2885 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2888 struct mgmt_cp_unblock_device *cp = data;
2892 BT_DBG("%s", hdev->name);
2894 if (!bdaddr_type_is_valid(cp->addr.type))
2895 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
2896 MGMT_STATUS_INVALID_PARAMS,
2897 &cp->addr, sizeof(cp->addr));
2901 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2903 status = MGMT_STATUS_INVALID_PARAMS;
2905 status = MGMT_STATUS_SUCCESS;
2907 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2908 &cp->addr, sizeof(cp->addr));
2910 hci_dev_unlock(hdev);
2915 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2918 struct mgmt_cp_set_device_id *cp = data;
2919 struct hci_request req;
2923 BT_DBG("%s", hdev->name);
2925 source = __le16_to_cpu(cp->source);
2927 if (source > 0x0002)
2928 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2929 MGMT_STATUS_INVALID_PARAMS);
2933 hdev->devid_source = source;
2934 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2935 hdev->devid_product = __le16_to_cpu(cp->product);
2936 hdev->devid_version = __le16_to_cpu(cp->version);
2938 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2940 hci_req_init(&req, hdev);
2942 hci_req_run(&req, NULL);
2944 hci_dev_unlock(hdev);
2949 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
2951 struct pending_cmd *cmd;
2953 BT_DBG("status 0x%02x", status);
2957 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2962 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2963 mgmt_status(status));
2965 struct mgmt_mode *cp = cmd->param;
2968 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
2970 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
2972 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2973 new_settings(hdev, cmd->sk);
2976 mgmt_pending_remove(cmd);
2979 hci_dev_unlock(hdev);
2982 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2983 void *data, u16 len)
2985 struct mgmt_mode *cp = data;
2986 struct pending_cmd *cmd;
2987 struct hci_request req;
2990 BT_DBG("%s", hdev->name);
2992 if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
2993 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2994 MGMT_STATUS_NOT_SUPPORTED);
2996 if (cp->val != 0x00 && cp->val != 0x01)
2997 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2998 MGMT_STATUS_INVALID_PARAMS);
3000 if (!hdev_is_powered(hdev))
3001 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3002 MGMT_STATUS_NOT_POWERED);
3004 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3005 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3006 MGMT_STATUS_REJECTED);
3010 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3011 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3016 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3017 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3022 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3029 hci_req_init(&req, hdev);
3031 write_fast_connectable(&req, cp->val);
3033 err = hci_req_run(&req, fast_connectable_complete);
3035 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3036 MGMT_STATUS_FAILED);
3037 mgmt_pending_remove(cmd);
3041 hci_dev_unlock(hdev);
3046 static bool ltk_is_valid(struct mgmt_ltk_info *key)
3048 if (key->authenticated != 0x00 && key->authenticated != 0x01)
3050 if (key->master != 0x00 && key->master != 0x01)
3052 if (!bdaddr_type_is_le(key->addr.type))
3057 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3058 void *cp_data, u16 len)
3060 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3061 u16 key_count, expected_len;
3064 key_count = __le16_to_cpu(cp->key_count);
3066 expected_len = sizeof(*cp) + key_count *
3067 sizeof(struct mgmt_ltk_info);
3068 if (expected_len != len) {
3069 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3071 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3072 MGMT_STATUS_INVALID_PARAMS);
3075 BT_DBG("%s key_count %u", hdev->name, key_count);
3077 for (i = 0; i < key_count; i++) {
3078 struct mgmt_ltk_info *key = &cp->keys[i];
3080 if (!ltk_is_valid(key))
3081 return cmd_status(sk, hdev->id,
3082 MGMT_OP_LOAD_LONG_TERM_KEYS,
3083 MGMT_STATUS_INVALID_PARAMS);
3088 hci_smp_ltks_clear(hdev);
3090 for (i = 0; i < key_count; i++) {
3091 struct mgmt_ltk_info *key = &cp->keys[i];
3097 type = HCI_SMP_LTK_SLAVE;
3099 hci_add_ltk(hdev, &key->addr.bdaddr,
3100 bdaddr_to_le(key->addr.type),
3101 type, 0, key->authenticated, key->val,
3102 key->enc_size, key->ediv, key->rand);
3105 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3108 hci_dev_unlock(hdev);
3113 static const struct mgmt_handler {
3114 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3118 } mgmt_handlers[] = {
3119 { NULL }, /* 0x0000 (no command) */
3120 { read_version, false, MGMT_READ_VERSION_SIZE },
3121 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3122 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3123 { read_controller_info, false, MGMT_READ_INFO_SIZE },
3124 { set_powered, false, MGMT_SETTING_SIZE },
3125 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
3126 { set_connectable, false, MGMT_SETTING_SIZE },
3127 { set_fast_connectable, false, MGMT_SETTING_SIZE },
3128 { set_pairable, false, MGMT_SETTING_SIZE },
3129 { set_link_security, false, MGMT_SETTING_SIZE },
3130 { set_ssp, false, MGMT_SETTING_SIZE },
3131 { set_hs, false, MGMT_SETTING_SIZE },
3132 { set_le, false, MGMT_SETTING_SIZE },
3133 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
3134 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
3135 { add_uuid, false, MGMT_ADD_UUID_SIZE },
3136 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
3137 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
3138 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
3139 { disconnect, false, MGMT_DISCONNECT_SIZE },
3140 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
3141 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
3142 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
3143 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
3144 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
3145 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
3146 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
3147 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
3148 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
3149 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
3150 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
3151 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
3152 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
3153 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
3154 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3155 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3156 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3157 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3158 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3159 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3163 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3167 struct mgmt_hdr *hdr;
3168 u16 opcode, index, len;
3169 struct hci_dev *hdev = NULL;
3170 const struct mgmt_handler *handler;
3173 BT_DBG("got %zu bytes", msglen);
3175 if (msglen < sizeof(*hdr))
3178 buf = kmalloc(msglen, GFP_KERNEL);
3182 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3188 opcode = __le16_to_cpu(hdr->opcode);
3189 index = __le16_to_cpu(hdr->index);
3190 len = __le16_to_cpu(hdr->len);
3192 if (len != msglen - sizeof(*hdr)) {
3197 if (index != MGMT_INDEX_NONE) {
3198 hdev = hci_dev_get(index);
3200 err = cmd_status(sk, index, opcode,
3201 MGMT_STATUS_INVALID_INDEX);
3206 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3207 mgmt_handlers[opcode].func == NULL) {
3208 BT_DBG("Unknown op %u", opcode);
3209 err = cmd_status(sk, index, opcode,
3210 MGMT_STATUS_UNKNOWN_COMMAND);
3214 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3215 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3216 err = cmd_status(sk, index, opcode,
3217 MGMT_STATUS_INVALID_INDEX);
3221 handler = &mgmt_handlers[opcode];
3223 if ((handler->var_len && len < handler->data_len) ||
3224 (!handler->var_len && len != handler->data_len)) {
3225 err = cmd_status(sk, index, opcode,
3226 MGMT_STATUS_INVALID_PARAMS);
3231 mgmt_init_hdev(sk, hdev);
3233 cp = buf + sizeof(*hdr);
3235 err = handler->func(sk, hdev, cp, len);
3249 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
3253 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
3254 mgmt_pending_remove(cmd);
3257 int mgmt_index_added(struct hci_dev *hdev)
3259 if (!mgmt_valid_hdev(hdev))
3262 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3265 int mgmt_index_removed(struct hci_dev *hdev)
3267 u8 status = MGMT_STATUS_INVALID_INDEX;
3269 if (!mgmt_valid_hdev(hdev))
3272 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3274 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3279 struct hci_dev *hdev;
3283 static void settings_rsp(struct pending_cmd *cmd, void *data)
3285 struct cmd_lookup *match = data;
3287 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
3289 list_del(&cmd->list);
3291 if (match->sk == NULL) {
3292 match->sk = cmd->sk;
3293 sock_hold(match->sk);
3296 mgmt_pending_free(cmd);
3299 static void set_bredr_scan(struct hci_request *req)
3301 struct hci_dev *hdev = req->hdev;
3304 /* Ensure that fast connectable is disabled. This function will
3305 * not do anything if the page scan parameters are already what
3308 write_fast_connectable(req, false);
3310 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3312 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3313 scan |= SCAN_INQUIRY;
3316 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3319 static void powered_complete(struct hci_dev *hdev, u8 status)
3321 struct cmd_lookup match = { NULL, hdev };
3323 BT_DBG("status 0x%02x", status);
3327 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3329 new_settings(hdev, match.sk);
3331 hci_dev_unlock(hdev);
3337 static int powered_update_hci(struct hci_dev *hdev)
3339 struct hci_request req;
3342 hci_req_init(&req, hdev);
3344 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3345 !lmp_host_ssp_capable(hdev)) {
3348 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3351 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
3352 struct hci_cp_write_le_host_supported cp;
3355 cp.simul = lmp_le_br_capable(hdev);
3357 /* Check first if we already have the right
3358 * host state (host features set)
3360 if (cp.le != lmp_host_le_capable(hdev) ||
3361 cp.simul != lmp_host_le_br_capable(hdev))
3362 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3366 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3367 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3368 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3369 sizeof(link_sec), &link_sec);
3371 if (lmp_bredr_capable(hdev)) {
3372 set_bredr_scan(&req);
3378 return hci_req_run(&req, powered_complete);
3381 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3383 struct cmd_lookup match = { NULL, hdev };
3384 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3385 u8 zero_cod[] = { 0, 0, 0 };
3388 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3392 if (powered_update_hci(hdev) == 0)
3395 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3400 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3401 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3403 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3404 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3405 zero_cod, sizeof(zero_cod), NULL);
3408 err = new_settings(hdev, match.sk);
3416 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3418 struct cmd_lookup match = { NULL, hdev };
3419 bool changed = false;
3423 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3426 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3430 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3434 err = new_settings(hdev, match.sk);
3442 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3444 struct pending_cmd *cmd;
3445 bool changed = false;
3449 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3452 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3456 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
3459 err = new_settings(hdev, cmd ? cmd->sk : NULL);
3464 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3466 u8 mgmt_err = mgmt_status(status);
3468 if (scan & SCAN_PAGE)
3469 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3470 cmd_status_rsp, &mgmt_err);
3472 if (scan & SCAN_INQUIRY)
3473 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3474 cmd_status_rsp, &mgmt_err);
3479 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3482 struct mgmt_ev_new_link_key ev;
3484 memset(&ev, 0, sizeof(ev));
3486 ev.store_hint = persistent;
3487 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3488 ev.key.addr.type = BDADDR_BREDR;
3489 ev.key.type = key->type;
3490 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3491 ev.key.pin_len = key->pin_len;
3493 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3496 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3498 struct mgmt_ev_new_long_term_key ev;
3500 memset(&ev, 0, sizeof(ev));
3502 ev.store_hint = persistent;
3503 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3504 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3505 ev.key.authenticated = key->authenticated;
3506 ev.key.enc_size = key->enc_size;
3507 ev.key.ediv = key->ediv;
3509 if (key->type == HCI_SMP_LTK)
3512 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3513 memcpy(ev.key.val, key->val, sizeof(key->val));
3515 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3519 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3520 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3524 struct mgmt_ev_device_connected *ev = (void *) buf;
3527 bacpy(&ev->addr.bdaddr, bdaddr);
3528 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3530 ev->flags = __cpu_to_le32(flags);
3533 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3536 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3537 eir_len = eir_append_data(ev->eir, eir_len,
3538 EIR_CLASS_OF_DEV, dev_class, 3);
3540 ev->eir_len = cpu_to_le16(eir_len);
3542 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3543 sizeof(*ev) + eir_len, NULL);
3546 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3548 struct mgmt_cp_disconnect *cp = cmd->param;
3549 struct sock **sk = data;
3550 struct mgmt_rp_disconnect rp;
3552 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3553 rp.addr.type = cp->addr.type;
3555 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3561 mgmt_pending_remove(cmd);
3564 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3566 struct hci_dev *hdev = data;
3567 struct mgmt_cp_unpair_device *cp = cmd->param;
3568 struct mgmt_rp_unpair_device rp;
3570 memset(&rp, 0, sizeof(rp));
3571 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3572 rp.addr.type = cp->addr.type;
3574 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3576 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3578 mgmt_pending_remove(cmd);
3581 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3582 u8 link_type, u8 addr_type, u8 reason)
3584 struct mgmt_ev_device_disconnected ev;
3585 struct sock *sk = NULL;
3588 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3590 bacpy(&ev.addr.bdaddr, bdaddr);
3591 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3594 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3600 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3606 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3607 u8 link_type, u8 addr_type, u8 status)
3609 struct mgmt_rp_disconnect rp;
3610 struct pending_cmd *cmd;
3613 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3616 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3620 bacpy(&rp.addr.bdaddr, bdaddr);
3621 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3623 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3624 mgmt_status(status), &rp, sizeof(rp));
3626 mgmt_pending_remove(cmd);
3631 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3632 u8 addr_type, u8 status)
3634 struct mgmt_ev_connect_failed ev;
3636 bacpy(&ev.addr.bdaddr, bdaddr);
3637 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3638 ev.status = mgmt_status(status);
3640 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3643 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3645 struct mgmt_ev_pin_code_request ev;
3647 bacpy(&ev.addr.bdaddr, bdaddr);
3648 ev.addr.type = BDADDR_BREDR;
3651 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3655 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3658 struct pending_cmd *cmd;
3659 struct mgmt_rp_pin_code_reply rp;
3662 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3666 bacpy(&rp.addr.bdaddr, bdaddr);
3667 rp.addr.type = BDADDR_BREDR;
3669 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3670 mgmt_status(status), &rp, sizeof(rp));
3672 mgmt_pending_remove(cmd);
3677 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3680 struct pending_cmd *cmd;
3681 struct mgmt_rp_pin_code_reply rp;
3684 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3688 bacpy(&rp.addr.bdaddr, bdaddr);
3689 rp.addr.type = BDADDR_BREDR;
3691 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3692 mgmt_status(status), &rp, sizeof(rp));
3694 mgmt_pending_remove(cmd);
3699 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3700 u8 link_type, u8 addr_type, __le32 value,
3703 struct mgmt_ev_user_confirm_request ev;
3705 BT_DBG("%s", hdev->name);
3707 bacpy(&ev.addr.bdaddr, bdaddr);
3708 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3709 ev.confirm_hint = confirm_hint;
3712 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3716 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3717 u8 link_type, u8 addr_type)
3719 struct mgmt_ev_user_passkey_request ev;
3721 BT_DBG("%s", hdev->name);
3723 bacpy(&ev.addr.bdaddr, bdaddr);
3724 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3726 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3730 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3731 u8 link_type, u8 addr_type, u8 status,
3734 struct pending_cmd *cmd;
3735 struct mgmt_rp_user_confirm_reply rp;
3738 cmd = mgmt_pending_find(opcode, hdev);
3742 bacpy(&rp.addr.bdaddr, bdaddr);
3743 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3744 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3747 mgmt_pending_remove(cmd);
3752 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3753 u8 link_type, u8 addr_type, u8 status)
3755 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3756 status, MGMT_OP_USER_CONFIRM_REPLY);
3759 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3760 u8 link_type, u8 addr_type, u8 status)
3762 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3764 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3767 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3768 u8 link_type, u8 addr_type, u8 status)
3770 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3771 status, MGMT_OP_USER_PASSKEY_REPLY);
3774 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3775 u8 link_type, u8 addr_type, u8 status)
3777 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3779 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3782 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3783 u8 link_type, u8 addr_type, u32 passkey,
3786 struct mgmt_ev_passkey_notify ev;
3788 BT_DBG("%s", hdev->name);
3790 bacpy(&ev.addr.bdaddr, bdaddr);
3791 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3792 ev.passkey = __cpu_to_le32(passkey);
3793 ev.entered = entered;
3795 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3798 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3799 u8 addr_type, u8 status)
3801 struct mgmt_ev_auth_failed ev;
3803 bacpy(&ev.addr.bdaddr, bdaddr);
3804 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3805 ev.status = mgmt_status(status);
3807 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3810 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3812 struct cmd_lookup match = { NULL, hdev };
3813 bool changed = false;
3817 u8 mgmt_err = mgmt_status(status);
3818 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3819 cmd_status_rsp, &mgmt_err);
3823 if (test_bit(HCI_AUTH, &hdev->flags)) {
3824 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3827 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3831 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3835 err = new_settings(hdev, match.sk);
3843 static void clear_eir(struct hci_request *req)
3845 struct hci_dev *hdev = req->hdev;
3846 struct hci_cp_write_eir cp;
3848 if (!lmp_ext_inq_capable(hdev))
3851 memset(hdev->eir, 0, sizeof(hdev->eir));
3853 memset(&cp, 0, sizeof(cp));
3855 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3858 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3860 struct cmd_lookup match = { NULL, hdev };
3861 struct hci_request req;
3862 bool changed = false;
3866 u8 mgmt_err = mgmt_status(status);
3868 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
3870 err = new_settings(hdev, NULL);
3872 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3879 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3882 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3886 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
3889 err = new_settings(hdev, match.sk);
3894 hci_req_init(&req, hdev);
3896 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3901 hci_req_run(&req, NULL);
3906 static void sk_lookup(struct pending_cmd *cmd, void *data)
3908 struct cmd_lookup *match = data;
3910 if (match->sk == NULL) {
3911 match->sk = cmd->sk;
3912 sock_hold(match->sk);
3916 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3919 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3922 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
3923 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
3924 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
3927 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3936 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
3938 struct mgmt_cp_set_local_name ev;
3939 struct pending_cmd *cmd;
3944 memset(&ev, 0, sizeof(ev));
3945 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3946 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
3948 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3950 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3952 /* If this is a HCI command related to powering on the
3953 * HCI dev don't send any mgmt signals.
3955 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
3959 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
3960 cmd ? cmd->sk : NULL);
3963 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
3964 u8 *randomizer, u8 status)
3966 struct pending_cmd *cmd;
3969 BT_DBG("%s status %u", hdev->name, status);
3971 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3976 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3977 mgmt_status(status));
3979 struct mgmt_rp_read_local_oob_data rp;
3981 memcpy(rp.hash, hash, sizeof(rp.hash));
3982 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
3984 err = cmd_complete(cmd->sk, hdev->id,
3985 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
3989 mgmt_pending_remove(cmd);
3994 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3996 struct cmd_lookup match = { NULL, hdev };
3997 bool changed = false;
4001 u8 mgmt_err = mgmt_status(status);
4003 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
4005 err = new_settings(hdev, NULL);
4007 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
4014 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4017 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4021 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
4024 err = new_settings(hdev, match.sk);
4032 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4033 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4034 ssp, u8 *eir, u16 eir_len)
4037 struct mgmt_ev_device_found *ev = (void *) buf;
4040 /* Leave 5 bytes for a potential CoD field */
4041 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4044 memset(buf, 0, sizeof(buf));
4046 bacpy(&ev->addr.bdaddr, bdaddr);
4047 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4050 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4052 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4055 memcpy(ev->eir, eir, eir_len);
4057 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4058 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4061 ev->eir_len = cpu_to_le16(eir_len);
4062 ev_size = sizeof(*ev) + eir_len;
4064 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4067 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4068 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4070 struct mgmt_ev_device_found *ev;
4071 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4074 ev = (struct mgmt_ev_device_found *) buf;
4076 memset(buf, 0, sizeof(buf));
4078 bacpy(&ev->addr.bdaddr, bdaddr);
4079 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4082 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4085 ev->eir_len = cpu_to_le16(eir_len);
4087 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
4088 sizeof(*ev) + eir_len, NULL);
4091 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
4093 struct pending_cmd *cmd;
4097 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4099 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4103 type = hdev->discovery.type;
4105 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4106 &type, sizeof(type));
4107 mgmt_pending_remove(cmd);
4112 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
4114 struct pending_cmd *cmd;
4117 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4121 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4122 &hdev->discovery.type, sizeof(hdev->discovery.type));
4123 mgmt_pending_remove(cmd);
4128 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4130 struct mgmt_ev_discovering ev;
4131 struct pending_cmd *cmd;
4133 BT_DBG("%s discovering %u", hdev->name, discovering);
4136 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4138 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4141 u8 type = hdev->discovery.type;
4143 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4145 mgmt_pending_remove(cmd);
4148 memset(&ev, 0, sizeof(ev));
4149 ev.type = hdev->discovery.type;
4150 ev.discovering = discovering;
4152 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4155 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4157 struct pending_cmd *cmd;
4158 struct mgmt_ev_device_blocked ev;
4160 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4162 bacpy(&ev.addr.bdaddr, bdaddr);
4163 ev.addr.type = type;
4165 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4166 cmd ? cmd->sk : NULL);
4169 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4171 struct pending_cmd *cmd;
4172 struct mgmt_ev_device_unblocked ev;
4174 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4176 bacpy(&ev.addr.bdaddr, bdaddr);
4177 ev.addr.type = type;
4179 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4180 cmd ? cmd->sk : NULL);
4183 module_param(enable_hs, bool, 0644);
4184 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");