2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
84 MGMT_EV_INDEX_REMOVED,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
106 * These LE scan and inquiry parameters were chosen according to LE General
107 * Discovery Procedure specification.
109 #define LE_SCAN_WIN 0x12
110 #define LE_SCAN_INT 0x12
111 #define LE_SCAN_TIMEOUT_LE_ONLY msecs_to_jiffies(10240)
112 #define LE_SCAN_TIMEOUT_BREDR_LE msecs_to_jiffies(5120)
114 #define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
115 #define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
117 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
119 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
120 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
123 struct list_head list;
131 /* HCI to MGMT error code conversion table */
132 static u8 mgmt_status_table[] = {
134 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
135 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
136 MGMT_STATUS_FAILED, /* Hardware Failure */
137 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
138 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
139 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
140 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
141 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
142 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
143 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
144 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
145 MGMT_STATUS_BUSY, /* Command Disallowed */
146 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
147 MGMT_STATUS_REJECTED, /* Rejected Security */
148 MGMT_STATUS_REJECTED, /* Rejected Personal */
149 MGMT_STATUS_TIMEOUT, /* Host Timeout */
150 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
151 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
152 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
153 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
154 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
155 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
156 MGMT_STATUS_BUSY, /* Repeated Attempts */
157 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
158 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
159 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
160 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
161 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
162 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
163 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
164 MGMT_STATUS_FAILED, /* Unspecified Error */
165 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
166 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
167 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
168 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
169 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
170 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
171 MGMT_STATUS_FAILED, /* Unit Link Key Used */
172 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
173 MGMT_STATUS_TIMEOUT, /* Instant Passed */
174 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
175 MGMT_STATUS_FAILED, /* Transaction Collision */
176 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
177 MGMT_STATUS_REJECTED, /* QoS Rejected */
178 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
179 MGMT_STATUS_REJECTED, /* Insufficient Security */
180 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
181 MGMT_STATUS_BUSY, /* Role Switch Pending */
182 MGMT_STATUS_FAILED, /* Slot Violation */
183 MGMT_STATUS_FAILED, /* Role Switch Failed */
184 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
185 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
186 MGMT_STATUS_BUSY, /* Host Busy Pairing */
187 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
188 MGMT_STATUS_BUSY, /* Controller Busy */
189 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
190 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
191 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
192 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
193 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
196 bool mgmt_valid_hdev(struct hci_dev *hdev)
198 return hdev->dev_type == HCI_BREDR;
201 static u8 mgmt_status(u8 hci_status)
203 if (hci_status < ARRAY_SIZE(mgmt_status_table))
204 return mgmt_status_table[hci_status];
206 return MGMT_STATUS_FAILED;
209 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
212 struct mgmt_hdr *hdr;
213 struct mgmt_ev_cmd_status *ev;
216 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
218 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
222 hdr = (void *) skb_put(skb, sizeof(*hdr));
224 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
225 hdr->index = cpu_to_le16(index);
226 hdr->len = cpu_to_le16(sizeof(*ev));
228 ev = (void *) skb_put(skb, sizeof(*ev));
230 ev->opcode = cpu_to_le16(cmd);
232 err = sock_queue_rcv_skb(sk, skb);
239 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
240 void *rp, size_t rp_len)
243 struct mgmt_hdr *hdr;
244 struct mgmt_ev_cmd_complete *ev;
247 BT_DBG("sock %p", sk);
249 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
253 hdr = (void *) skb_put(skb, sizeof(*hdr));
255 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
256 hdr->index = cpu_to_le16(index);
257 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
259 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
260 ev->opcode = cpu_to_le16(cmd);
264 memcpy(ev->data, rp, rp_len);
266 err = sock_queue_rcv_skb(sk, skb);
273 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
276 struct mgmt_rp_read_version rp;
278 BT_DBG("sock %p", sk);
280 rp.version = MGMT_VERSION;
281 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
283 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
287 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
290 struct mgmt_rp_read_commands *rp;
291 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
292 const u16 num_events = ARRAY_SIZE(mgmt_events);
297 BT_DBG("sock %p", sk);
299 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
301 rp = kmalloc(rp_size, GFP_KERNEL);
305 rp->num_commands = __constant_cpu_to_le16(num_commands);
306 rp->num_events = __constant_cpu_to_le16(num_events);
308 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
309 put_unaligned_le16(mgmt_commands[i], opcode);
311 for (i = 0; i < num_events; i++, opcode++)
312 put_unaligned_le16(mgmt_events[i], opcode);
314 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
321 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
324 struct mgmt_rp_read_index_list *rp;
330 BT_DBG("sock %p", sk);
332 read_lock(&hci_dev_list_lock);
335 list_for_each_entry(d, &hci_dev_list, list) {
336 if (!mgmt_valid_hdev(d))
342 rp_len = sizeof(*rp) + (2 * count);
343 rp = kmalloc(rp_len, GFP_ATOMIC);
345 read_unlock(&hci_dev_list_lock);
350 list_for_each_entry(d, &hci_dev_list, list) {
351 if (test_bit(HCI_SETUP, &d->dev_flags))
354 if (!mgmt_valid_hdev(d))
357 rp->index[count++] = cpu_to_le16(d->id);
358 BT_DBG("Added hci%u", d->id);
361 rp->num_controllers = cpu_to_le16(count);
362 rp_len = sizeof(*rp) + (2 * count);
364 read_unlock(&hci_dev_list_lock);
366 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
374 static u32 get_supported_settings(struct hci_dev *hdev)
378 settings |= MGMT_SETTING_POWERED;
379 settings |= MGMT_SETTING_PAIRABLE;
381 if (lmp_ssp_capable(hdev))
382 settings |= MGMT_SETTING_SSP;
384 if (lmp_bredr_capable(hdev)) {
385 settings |= MGMT_SETTING_CONNECTABLE;
386 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
387 settings |= MGMT_SETTING_FAST_CONNECTABLE;
388 settings |= MGMT_SETTING_DISCOVERABLE;
389 settings |= MGMT_SETTING_BREDR;
390 settings |= MGMT_SETTING_LINK_SECURITY;
394 settings |= MGMT_SETTING_HS;
396 if (lmp_le_capable(hdev))
397 settings |= MGMT_SETTING_LE;
402 static u32 get_current_settings(struct hci_dev *hdev)
406 if (hdev_is_powered(hdev))
407 settings |= MGMT_SETTING_POWERED;
409 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_CONNECTABLE;
412 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
413 settings |= MGMT_SETTING_FAST_CONNECTABLE;
415 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
416 settings |= MGMT_SETTING_DISCOVERABLE;
418 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
419 settings |= MGMT_SETTING_PAIRABLE;
421 if (lmp_bredr_capable(hdev))
422 settings |= MGMT_SETTING_BREDR;
424 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
425 settings |= MGMT_SETTING_LE;
427 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
428 settings |= MGMT_SETTING_LINK_SECURITY;
430 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
431 settings |= MGMT_SETTING_SSP;
433 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
434 settings |= MGMT_SETTING_HS;
439 #define PNP_INFO_SVCLASS_ID 0x1200
441 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
443 u8 *ptr = data, *uuids_start = NULL;
444 struct bt_uuid *uuid;
449 list_for_each_entry(uuid, &hdev->uuids, list) {
452 if (uuid->size != 16)
455 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
459 if (uuid16 == PNP_INFO_SVCLASS_ID)
465 uuids_start[1] = EIR_UUID16_ALL;
469 /* Stop if not enough space to put next UUID */
470 if ((ptr - data) + sizeof(u16) > len) {
471 uuids_start[1] = EIR_UUID16_SOME;
475 *ptr++ = (uuid16 & 0x00ff);
476 *ptr++ = (uuid16 & 0xff00) >> 8;
477 uuids_start[0] += sizeof(uuid16);
483 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
485 u8 *ptr = data, *uuids_start = NULL;
486 struct bt_uuid *uuid;
491 list_for_each_entry(uuid, &hdev->uuids, list) {
492 if (uuid->size != 32)
498 uuids_start[1] = EIR_UUID32_ALL;
502 /* Stop if not enough space to put next UUID */
503 if ((ptr - data) + sizeof(u32) > len) {
504 uuids_start[1] = EIR_UUID32_SOME;
508 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
510 uuids_start[0] += sizeof(u32);
516 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
518 u8 *ptr = data, *uuids_start = NULL;
519 struct bt_uuid *uuid;
524 list_for_each_entry(uuid, &hdev->uuids, list) {
525 if (uuid->size != 128)
531 uuids_start[1] = EIR_UUID128_ALL;
535 /* Stop if not enough space to put next UUID */
536 if ((ptr - data) + 16 > len) {
537 uuids_start[1] = EIR_UUID128_SOME;
541 memcpy(ptr, uuid->uuid, 16);
543 uuids_start[0] += 16;
549 static void create_eir(struct hci_dev *hdev, u8 *data)
554 name_len = strlen(hdev->dev_name);
560 ptr[1] = EIR_NAME_SHORT;
562 ptr[1] = EIR_NAME_COMPLETE;
564 /* EIR Data length */
565 ptr[0] = name_len + 1;
567 memcpy(ptr + 2, hdev->dev_name, name_len);
569 ptr += (name_len + 2);
572 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
574 ptr[1] = EIR_TX_POWER;
575 ptr[2] = (u8) hdev->inq_tx_power;
580 if (hdev->devid_source > 0) {
582 ptr[1] = EIR_DEVICE_ID;
584 put_unaligned_le16(hdev->devid_source, ptr + 2);
585 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
586 put_unaligned_le16(hdev->devid_product, ptr + 6);
587 put_unaligned_le16(hdev->devid_version, ptr + 8);
592 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
593 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
594 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
597 static void update_eir(struct hci_request *req)
599 struct hci_dev *hdev = req->hdev;
600 struct hci_cp_write_eir cp;
602 if (!hdev_is_powered(hdev))
605 if (!lmp_ext_inq_capable(hdev))
608 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
611 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
614 memset(&cp, 0, sizeof(cp));
616 create_eir(hdev, cp.data);
618 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
621 memcpy(hdev->eir, cp.data, sizeof(cp.data));
623 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
626 static u8 get_service_classes(struct hci_dev *hdev)
628 struct bt_uuid *uuid;
631 list_for_each_entry(uuid, &hdev->uuids, list)
632 val |= uuid->svc_hint;
637 static void update_class(struct hci_request *req)
639 struct hci_dev *hdev = req->hdev;
642 BT_DBG("%s", hdev->name);
644 if (!hdev_is_powered(hdev))
647 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
650 cod[0] = hdev->minor_class;
651 cod[1] = hdev->major_class;
652 cod[2] = get_service_classes(hdev);
654 if (memcmp(cod, hdev->dev_class, 3) == 0)
657 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
660 static void service_cache_off(struct work_struct *work)
662 struct hci_dev *hdev = container_of(work, struct hci_dev,
664 struct hci_request req;
666 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
669 hci_req_init(&req, hdev);
676 hci_dev_unlock(hdev);
678 hci_req_run(&req, NULL);
681 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
683 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
686 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
688 /* Non-mgmt controlled devices get this bit set
689 * implicitly so that pairing works for them, however
690 * for mgmt we require user-space to explicitly enable
693 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
696 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
697 void *data, u16 data_len)
699 struct mgmt_rp_read_info rp;
701 BT_DBG("sock %p %s", sk, hdev->name);
705 memset(&rp, 0, sizeof(rp));
707 bacpy(&rp.bdaddr, &hdev->bdaddr);
709 rp.version = hdev->hci_ver;
710 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
712 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
713 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
715 memcpy(rp.dev_class, hdev->dev_class, 3);
717 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
718 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
720 hci_dev_unlock(hdev);
722 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
726 static void mgmt_pending_free(struct pending_cmd *cmd)
733 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
734 struct hci_dev *hdev, void *data,
737 struct pending_cmd *cmd;
739 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
743 cmd->opcode = opcode;
744 cmd->index = hdev->id;
746 cmd->param = kmalloc(len, GFP_KERNEL);
753 memcpy(cmd->param, data, len);
758 list_add(&cmd->list, &hdev->mgmt_pending);
763 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
764 void (*cb)(struct pending_cmd *cmd,
768 struct pending_cmd *cmd, *tmp;
770 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
771 if (opcode > 0 && cmd->opcode != opcode)
778 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
780 struct pending_cmd *cmd;
782 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
783 if (cmd->opcode == opcode)
790 static void mgmt_pending_remove(struct pending_cmd *cmd)
792 list_del(&cmd->list);
793 mgmt_pending_free(cmd);
796 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
798 __le32 settings = cpu_to_le32(get_current_settings(hdev));
800 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
804 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
807 struct mgmt_mode *cp = data;
808 struct pending_cmd *cmd;
811 BT_DBG("request for %s", hdev->name);
813 if (cp->val != 0x00 && cp->val != 0x01)
814 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
815 MGMT_STATUS_INVALID_PARAMS);
819 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
820 cancel_delayed_work(&hdev->power_off);
823 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
825 err = mgmt_powered(hdev, 1);
830 if (!!cp->val == hdev_is_powered(hdev)) {
831 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
835 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
836 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
841 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
848 queue_work(hdev->req_workqueue, &hdev->power_on);
850 queue_work(hdev->req_workqueue, &hdev->power_off.work);
855 hci_dev_unlock(hdev);
859 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
860 struct sock *skip_sk)
863 struct mgmt_hdr *hdr;
865 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
869 hdr = (void *) skb_put(skb, sizeof(*hdr));
870 hdr->opcode = cpu_to_le16(event);
872 hdr->index = cpu_to_le16(hdev->id);
874 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
875 hdr->len = cpu_to_le16(data_len);
878 memcpy(skb_put(skb, data_len), data, data_len);
881 __net_timestamp(skb);
883 hci_send_to_control(skb, skip_sk);
889 static int new_settings(struct hci_dev *hdev, struct sock *skip)
893 ev = cpu_to_le32(get_current_settings(hdev));
895 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
898 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
901 struct mgmt_cp_set_discoverable *cp = data;
902 struct pending_cmd *cmd;
907 BT_DBG("request for %s", hdev->name);
909 if (!lmp_bredr_capable(hdev))
910 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
911 MGMT_STATUS_NOT_SUPPORTED);
913 if (cp->val != 0x00 && cp->val != 0x01)
914 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
915 MGMT_STATUS_INVALID_PARAMS);
917 timeout = __le16_to_cpu(cp->timeout);
918 if (!cp->val && timeout > 0)
919 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
920 MGMT_STATUS_INVALID_PARAMS);
924 if (!hdev_is_powered(hdev) && timeout > 0) {
925 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
926 MGMT_STATUS_NOT_POWERED);
930 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
931 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
932 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
937 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
938 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
939 MGMT_STATUS_REJECTED);
943 if (!hdev_is_powered(hdev)) {
944 bool changed = false;
946 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
947 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
951 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
956 err = new_settings(hdev, sk);
961 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
962 if (hdev->discov_timeout > 0) {
963 cancel_delayed_work(&hdev->discov_off);
964 hdev->discov_timeout = 0;
967 if (cp->val && timeout > 0) {
968 hdev->discov_timeout = timeout;
969 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
970 msecs_to_jiffies(hdev->discov_timeout * 1000));
973 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
977 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
986 scan |= SCAN_INQUIRY;
988 cancel_delayed_work(&hdev->discov_off);
990 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
992 mgmt_pending_remove(cmd);
995 hdev->discov_timeout = timeout;
998 hci_dev_unlock(hdev);
1002 static void write_fast_connectable(struct hci_request *req, bool enable)
1004 struct hci_dev *hdev = req->hdev;
1005 struct hci_cp_write_page_scan_activity acp;
1008 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1012 type = PAGE_SCAN_TYPE_INTERLACED;
1014 /* 160 msec page scan interval */
1015 acp.interval = __constant_cpu_to_le16(0x0100);
1017 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1019 /* default 1.28 sec page scan */
1020 acp.interval = __constant_cpu_to_le16(0x0800);
1023 acp.window = __constant_cpu_to_le16(0x0012);
1025 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1026 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1027 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1030 if (hdev->page_scan_type != type)
1031 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1034 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1036 struct pending_cmd *cmd;
1038 BT_DBG("status 0x%02x", status);
1042 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1046 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1048 mgmt_pending_remove(cmd);
1051 hci_dev_unlock(hdev);
1054 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1057 struct mgmt_mode *cp = data;
1058 struct pending_cmd *cmd;
1059 struct hci_request req;
1063 BT_DBG("request for %s", hdev->name);
1065 if (!lmp_bredr_capable(hdev))
1066 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1067 MGMT_STATUS_NOT_SUPPORTED);
1069 if (cp->val != 0x00 && cp->val != 0x01)
1070 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1071 MGMT_STATUS_INVALID_PARAMS);
1075 if (!hdev_is_powered(hdev)) {
1076 bool changed = false;
1078 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1082 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1084 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1085 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1088 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1093 err = new_settings(hdev, sk);
1098 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1099 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1100 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1105 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1106 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1110 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1121 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1122 hdev->discov_timeout > 0)
1123 cancel_delayed_work(&hdev->discov_off);
1126 hci_req_init(&req, hdev);
1128 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1130 /* If we're going from non-connectable to connectable or
1131 * vice-versa when fast connectable is enabled ensure that fast
1132 * connectable gets disabled. write_fast_connectable won't do
1133 * anything if the page scan parameters are already what they
1136 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1137 write_fast_connectable(&req, false);
1139 err = hci_req_run(&req, set_connectable_complete);
1141 mgmt_pending_remove(cmd);
1144 hci_dev_unlock(hdev);
1148 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1151 struct mgmt_mode *cp = data;
1154 BT_DBG("request for %s", hdev->name);
1156 if (cp->val != 0x00 && cp->val != 0x01)
1157 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1158 MGMT_STATUS_INVALID_PARAMS);
1163 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1165 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1167 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1171 err = new_settings(hdev, sk);
1174 hci_dev_unlock(hdev);
1178 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1181 struct mgmt_mode *cp = data;
1182 struct pending_cmd *cmd;
1186 BT_DBG("request for %s", hdev->name);
1188 if (!lmp_bredr_capable(hdev))
1189 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1190 MGMT_STATUS_NOT_SUPPORTED);
1192 if (cp->val != 0x00 && cp->val != 0x01)
1193 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1194 MGMT_STATUS_INVALID_PARAMS);
1198 if (!hdev_is_powered(hdev)) {
1199 bool changed = false;
1201 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1202 &hdev->dev_flags)) {
1203 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1207 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1212 err = new_settings(hdev, sk);
1217 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1218 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1225 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1226 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1230 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1236 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1238 mgmt_pending_remove(cmd);
1243 hci_dev_unlock(hdev);
1247 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1249 struct mgmt_mode *cp = data;
1250 struct pending_cmd *cmd;
1254 BT_DBG("request for %s", hdev->name);
1256 if (!lmp_ssp_capable(hdev))
1257 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1258 MGMT_STATUS_NOT_SUPPORTED);
1260 if (cp->val != 0x00 && cp->val != 0x01)
1261 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1262 MGMT_STATUS_INVALID_PARAMS);
1268 if (!hdev_is_powered(hdev)) {
1269 bool changed = false;
1271 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1272 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1276 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1281 err = new_settings(hdev, sk);
1286 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1287 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1292 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1293 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1297 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1303 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1305 mgmt_pending_remove(cmd);
1310 hci_dev_unlock(hdev);
1314 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1316 struct mgmt_mode *cp = data;
1318 BT_DBG("request for %s", hdev->name);
1321 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1322 MGMT_STATUS_NOT_SUPPORTED);
1324 if (cp->val != 0x00 && cp->val != 0x01)
1325 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1326 MGMT_STATUS_INVALID_PARAMS);
1329 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1331 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1333 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1336 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1338 struct mgmt_mode *cp = data;
1339 struct hci_cp_write_le_host_supported hci_cp;
1340 struct pending_cmd *cmd;
1344 BT_DBG("request for %s", hdev->name);
1346 if (!lmp_le_capable(hdev))
1347 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1348 MGMT_STATUS_NOT_SUPPORTED);
1350 if (cp->val != 0x00 && cp->val != 0x01)
1351 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1352 MGMT_STATUS_INVALID_PARAMS);
1354 /* LE-only devices do not allow toggling LE on/off */
1355 if (!lmp_bredr_capable(hdev))
1356 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1357 MGMT_STATUS_REJECTED);
1362 enabled = lmp_host_le_capable(hdev);
1364 if (!hdev_is_powered(hdev) || val == enabled) {
1365 bool changed = false;
1367 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1368 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1372 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1377 err = new_settings(hdev, sk);
1382 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1383 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1388 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1394 memset(&hci_cp, 0, sizeof(hci_cp));
1398 hci_cp.simul = lmp_le_br_capable(hdev);
1401 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1404 mgmt_pending_remove(cmd);
1407 hci_dev_unlock(hdev);
1411 /* This is a helper function to test for pending mgmt commands that can
1412 * cause CoD or EIR HCI commands. We can only allow one such pending
1413 * mgmt command at a time since otherwise we cannot easily track what
1414 * the current values are, will be, and based on that calculate if a new
1415 * HCI command needs to be sent and if yes with what value.
1417 static bool pending_eir_or_class(struct hci_dev *hdev)
1419 struct pending_cmd *cmd;
1421 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1422 switch (cmd->opcode) {
1423 case MGMT_OP_ADD_UUID:
1424 case MGMT_OP_REMOVE_UUID:
1425 case MGMT_OP_SET_DEV_CLASS:
1426 case MGMT_OP_SET_POWERED:
1434 static const u8 bluetooth_base_uuid[] = {
1435 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1436 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1439 static u8 get_uuid_size(const u8 *uuid)
1443 if (memcmp(uuid, bluetooth_base_uuid, 12))
1446 val = get_unaligned_le32(&uuid[12]);
1453 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1455 struct pending_cmd *cmd;
1459 cmd = mgmt_pending_find(mgmt_op, hdev);
1463 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1464 hdev->dev_class, 3);
1466 mgmt_pending_remove(cmd);
1469 hci_dev_unlock(hdev);
1472 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1474 BT_DBG("status 0x%02x", status);
1476 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1479 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1481 struct mgmt_cp_add_uuid *cp = data;
1482 struct pending_cmd *cmd;
1483 struct hci_request req;
1484 struct bt_uuid *uuid;
1487 BT_DBG("request for %s", hdev->name);
1491 if (pending_eir_or_class(hdev)) {
1492 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1497 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1503 memcpy(uuid->uuid, cp->uuid, 16);
1504 uuid->svc_hint = cp->svc_hint;
1505 uuid->size = get_uuid_size(cp->uuid);
1507 list_add_tail(&uuid->list, &hdev->uuids);
1509 hci_req_init(&req, hdev);
1514 err = hci_req_run(&req, add_uuid_complete);
1516 if (err != -ENODATA)
1519 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1520 hdev->dev_class, 3);
1524 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1533 hci_dev_unlock(hdev);
1537 static bool enable_service_cache(struct hci_dev *hdev)
1539 if (!hdev_is_powered(hdev))
1542 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1543 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1551 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1553 BT_DBG("status 0x%02x", status);
1555 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1558 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1561 struct mgmt_cp_remove_uuid *cp = data;
1562 struct pending_cmd *cmd;
1563 struct bt_uuid *match, *tmp;
1564 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1565 struct hci_request req;
1568 BT_DBG("request for %s", hdev->name);
1572 if (pending_eir_or_class(hdev)) {
1573 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1578 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1579 err = hci_uuids_clear(hdev);
1581 if (enable_service_cache(hdev)) {
1582 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1583 0, hdev->dev_class, 3);
1592 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1593 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1596 list_del(&match->list);
1602 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1603 MGMT_STATUS_INVALID_PARAMS);
1608 hci_req_init(&req, hdev);
1613 err = hci_req_run(&req, remove_uuid_complete);
1615 if (err != -ENODATA)
1618 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1619 hdev->dev_class, 3);
1623 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1632 hci_dev_unlock(hdev);
1636 static void set_class_complete(struct hci_dev *hdev, u8 status)
1638 BT_DBG("status 0x%02x", status);
1640 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1643 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1646 struct mgmt_cp_set_dev_class *cp = data;
1647 struct pending_cmd *cmd;
1648 struct hci_request req;
1651 BT_DBG("request for %s", hdev->name);
1653 if (!lmp_bredr_capable(hdev))
1654 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1655 MGMT_STATUS_NOT_SUPPORTED);
1659 if (pending_eir_or_class(hdev)) {
1660 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1665 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1666 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1667 MGMT_STATUS_INVALID_PARAMS);
1671 hdev->major_class = cp->major;
1672 hdev->minor_class = cp->minor;
1674 if (!hdev_is_powered(hdev)) {
1675 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1676 hdev->dev_class, 3);
1680 hci_req_init(&req, hdev);
1682 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1683 hci_dev_unlock(hdev);
1684 cancel_delayed_work_sync(&hdev->service_cache);
1691 err = hci_req_run(&req, set_class_complete);
1693 if (err != -ENODATA)
1696 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1697 hdev->dev_class, 3);
1701 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1710 hci_dev_unlock(hdev);
1714 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1717 struct mgmt_cp_load_link_keys *cp = data;
1718 u16 key_count, expected_len;
1721 key_count = __le16_to_cpu(cp->key_count);
1723 expected_len = sizeof(*cp) + key_count *
1724 sizeof(struct mgmt_link_key_info);
1725 if (expected_len != len) {
1726 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1728 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1729 MGMT_STATUS_INVALID_PARAMS);
1732 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1733 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1734 MGMT_STATUS_INVALID_PARAMS);
1736 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1739 for (i = 0; i < key_count; i++) {
1740 struct mgmt_link_key_info *key = &cp->keys[i];
1742 if (key->addr.type != BDADDR_BREDR)
1743 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1744 MGMT_STATUS_INVALID_PARAMS);
1749 hci_link_keys_clear(hdev);
1751 set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1754 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1756 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1758 for (i = 0; i < key_count; i++) {
1759 struct mgmt_link_key_info *key = &cp->keys[i];
1761 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1762 key->type, key->pin_len);
1765 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1767 hci_dev_unlock(hdev);
1772 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1773 u8 addr_type, struct sock *skip_sk)
1775 struct mgmt_ev_device_unpaired ev;
1777 bacpy(&ev.addr.bdaddr, bdaddr);
1778 ev.addr.type = addr_type;
1780 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1784 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1787 struct mgmt_cp_unpair_device *cp = data;
1788 struct mgmt_rp_unpair_device rp;
1789 struct hci_cp_disconnect dc;
1790 struct pending_cmd *cmd;
1791 struct hci_conn *conn;
1794 memset(&rp, 0, sizeof(rp));
1795 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1796 rp.addr.type = cp->addr.type;
1798 if (!bdaddr_type_is_valid(cp->addr.type))
1799 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1800 MGMT_STATUS_INVALID_PARAMS,
1803 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1804 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1805 MGMT_STATUS_INVALID_PARAMS,
1810 if (!hdev_is_powered(hdev)) {
1811 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1812 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1816 if (cp->addr.type == BDADDR_BREDR)
1817 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1819 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1822 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1823 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1827 if (cp->disconnect) {
1828 if (cp->addr.type == BDADDR_BREDR)
1829 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1832 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1839 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1841 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1845 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1852 dc.handle = cpu_to_le16(conn->handle);
1853 dc.reason = 0x13; /* Remote User Terminated Connection */
1854 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1856 mgmt_pending_remove(cmd);
1859 hci_dev_unlock(hdev);
1863 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1866 struct mgmt_cp_disconnect *cp = data;
1867 struct mgmt_rp_disconnect rp;
1868 struct hci_cp_disconnect dc;
1869 struct pending_cmd *cmd;
1870 struct hci_conn *conn;
1875 memset(&rp, 0, sizeof(rp));
1876 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1877 rp.addr.type = cp->addr.type;
1879 if (!bdaddr_type_is_valid(cp->addr.type))
1880 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1881 MGMT_STATUS_INVALID_PARAMS,
1886 if (!test_bit(HCI_UP, &hdev->flags)) {
1887 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1888 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1892 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1893 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1894 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1898 if (cp->addr.type == BDADDR_BREDR)
1899 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1902 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1904 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1905 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1906 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1910 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1916 dc.handle = cpu_to_le16(conn->handle);
1917 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1919 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1921 mgmt_pending_remove(cmd);
1924 hci_dev_unlock(hdev);
1928 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1930 switch (link_type) {
1932 switch (addr_type) {
1933 case ADDR_LE_DEV_PUBLIC:
1934 return BDADDR_LE_PUBLIC;
1937 /* Fallback to LE Random address type */
1938 return BDADDR_LE_RANDOM;
1942 /* Fallback to BR/EDR type */
1943 return BDADDR_BREDR;
1947 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1950 struct mgmt_rp_get_connections *rp;
1960 if (!hdev_is_powered(hdev)) {
1961 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1962 MGMT_STATUS_NOT_POWERED);
1967 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1968 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1972 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1973 rp = kmalloc(rp_len, GFP_KERNEL);
1980 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1981 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1983 bacpy(&rp->addr[i].bdaddr, &c->dst);
1984 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1985 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1990 rp->conn_count = cpu_to_le16(i);
1992 /* Recalculate length in case of filtered SCO connections, etc */
1993 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1995 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2001 hci_dev_unlock(hdev);
2005 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2006 struct mgmt_cp_pin_code_neg_reply *cp)
2008 struct pending_cmd *cmd;
2011 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2016 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2017 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2019 mgmt_pending_remove(cmd);
2024 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2027 struct hci_conn *conn;
2028 struct mgmt_cp_pin_code_reply *cp = data;
2029 struct hci_cp_pin_code_reply reply;
2030 struct pending_cmd *cmd;
2037 if (!hdev_is_powered(hdev)) {
2038 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2039 MGMT_STATUS_NOT_POWERED);
2043 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2045 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2046 MGMT_STATUS_NOT_CONNECTED);
2050 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2051 struct mgmt_cp_pin_code_neg_reply ncp;
2053 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2055 BT_ERR("PIN code is not 16 bytes long");
2057 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2059 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2060 MGMT_STATUS_INVALID_PARAMS);
2065 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2071 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2072 reply.pin_len = cp->pin_len;
2073 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2075 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2077 mgmt_pending_remove(cmd);
2080 hci_dev_unlock(hdev);
2084 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2087 struct mgmt_cp_set_io_capability *cp = data;
2093 hdev->io_capability = cp->io_capability;
2095 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2096 hdev->io_capability);
2098 hci_dev_unlock(hdev);
2100 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2104 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2106 struct hci_dev *hdev = conn->hdev;
2107 struct pending_cmd *cmd;
2109 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2110 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2113 if (cmd->user_data != conn)
2122 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2124 struct mgmt_rp_pair_device rp;
2125 struct hci_conn *conn = cmd->user_data;
2127 bacpy(&rp.addr.bdaddr, &conn->dst);
2128 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2130 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2133 /* So we don't get further callbacks for this connection */
2134 conn->connect_cfm_cb = NULL;
2135 conn->security_cfm_cb = NULL;
2136 conn->disconn_cfm_cb = NULL;
2138 hci_conn_drop(conn);
2140 mgmt_pending_remove(cmd);
2143 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2145 struct pending_cmd *cmd;
2147 BT_DBG("status %u", status);
2149 cmd = find_pairing(conn);
2151 BT_DBG("Unable to find a pending command");
2153 pairing_complete(cmd, mgmt_status(status));
2156 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2158 struct pending_cmd *cmd;
2160 BT_DBG("status %u", status);
2165 cmd = find_pairing(conn);
2167 BT_DBG("Unable to find a pending command");
2169 pairing_complete(cmd, mgmt_status(status));
2172 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2175 struct mgmt_cp_pair_device *cp = data;
2176 struct mgmt_rp_pair_device rp;
2177 struct pending_cmd *cmd;
2178 u8 sec_level, auth_type;
2179 struct hci_conn *conn;
2184 memset(&rp, 0, sizeof(rp));
2185 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2186 rp.addr.type = cp->addr.type;
2188 if (!bdaddr_type_is_valid(cp->addr.type))
2189 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2190 MGMT_STATUS_INVALID_PARAMS,
2195 if (!hdev_is_powered(hdev)) {
2196 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2197 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2201 sec_level = BT_SECURITY_MEDIUM;
2202 if (cp->io_cap == 0x03)
2203 auth_type = HCI_AT_DEDICATED_BONDING;
2205 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2207 if (cp->addr.type == BDADDR_BREDR)
2208 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2209 cp->addr.type, sec_level, auth_type);
2211 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2212 cp->addr.type, sec_level, auth_type);
2217 if (PTR_ERR(conn) == -EBUSY)
2218 status = MGMT_STATUS_BUSY;
2220 status = MGMT_STATUS_CONNECT_FAILED;
2222 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2228 if (conn->connect_cfm_cb) {
2229 hci_conn_drop(conn);
2230 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2231 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2235 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2238 hci_conn_drop(conn);
2242 /* For LE, just connecting isn't a proof that the pairing finished */
2243 if (cp->addr.type == BDADDR_BREDR)
2244 conn->connect_cfm_cb = pairing_complete_cb;
2246 conn->connect_cfm_cb = le_connect_complete_cb;
2248 conn->security_cfm_cb = pairing_complete_cb;
2249 conn->disconn_cfm_cb = pairing_complete_cb;
2250 conn->io_capability = cp->io_cap;
2251 cmd->user_data = conn;
2253 if (conn->state == BT_CONNECTED &&
2254 hci_conn_security(conn, sec_level, auth_type))
2255 pairing_complete(cmd, 0);
2260 hci_dev_unlock(hdev);
2264 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2267 struct mgmt_addr_info *addr = data;
2268 struct pending_cmd *cmd;
2269 struct hci_conn *conn;
2276 if (!hdev_is_powered(hdev)) {
2277 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2278 MGMT_STATUS_NOT_POWERED);
2282 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2284 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2285 MGMT_STATUS_INVALID_PARAMS);
2289 conn = cmd->user_data;
2291 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2292 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2293 MGMT_STATUS_INVALID_PARAMS);
2297 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2299 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2300 addr, sizeof(*addr));
2302 hci_dev_unlock(hdev);
2306 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2307 struct mgmt_addr_info *addr, u16 mgmt_op,
2308 u16 hci_op, __le32 passkey)
2310 struct pending_cmd *cmd;
2311 struct hci_conn *conn;
2316 if (!hdev_is_powered(hdev)) {
2317 err = cmd_complete(sk, hdev->id, mgmt_op,
2318 MGMT_STATUS_NOT_POWERED, addr,
2323 if (addr->type == BDADDR_BREDR)
2324 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2326 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2329 err = cmd_complete(sk, hdev->id, mgmt_op,
2330 MGMT_STATUS_NOT_CONNECTED, addr,
2335 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2336 /* Continue with pairing via SMP */
2337 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2340 err = cmd_complete(sk, hdev->id, mgmt_op,
2341 MGMT_STATUS_SUCCESS, addr,
2344 err = cmd_complete(sk, hdev->id, mgmt_op,
2345 MGMT_STATUS_FAILED, addr,
2351 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2357 /* Continue with pairing via HCI */
2358 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2359 struct hci_cp_user_passkey_reply cp;
2361 bacpy(&cp.bdaddr, &addr->bdaddr);
2362 cp.passkey = passkey;
2363 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2365 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2369 mgmt_pending_remove(cmd);
2372 hci_dev_unlock(hdev);
2376 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2377 void *data, u16 len)
2379 struct mgmt_cp_pin_code_neg_reply *cp = data;
2383 return user_pairing_resp(sk, hdev, &cp->addr,
2384 MGMT_OP_PIN_CODE_NEG_REPLY,
2385 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2388 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2391 struct mgmt_cp_user_confirm_reply *cp = data;
2395 if (len != sizeof(*cp))
2396 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2397 MGMT_STATUS_INVALID_PARAMS);
2399 return user_pairing_resp(sk, hdev, &cp->addr,
2400 MGMT_OP_USER_CONFIRM_REPLY,
2401 HCI_OP_USER_CONFIRM_REPLY, 0);
2404 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2405 void *data, u16 len)
2407 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2411 return user_pairing_resp(sk, hdev, &cp->addr,
2412 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2413 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2416 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2419 struct mgmt_cp_user_passkey_reply *cp = data;
2423 return user_pairing_resp(sk, hdev, &cp->addr,
2424 MGMT_OP_USER_PASSKEY_REPLY,
2425 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2428 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2429 void *data, u16 len)
2431 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2435 return user_pairing_resp(sk, hdev, &cp->addr,
2436 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2437 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2440 static void update_name(struct hci_request *req)
2442 struct hci_dev *hdev = req->hdev;
2443 struct hci_cp_write_local_name cp;
2445 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2447 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2450 static void set_name_complete(struct hci_dev *hdev, u8 status)
2452 struct mgmt_cp_set_local_name *cp;
2453 struct pending_cmd *cmd;
2455 BT_DBG("status 0x%02x", status);
2459 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2466 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2467 mgmt_status(status));
2469 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2472 mgmt_pending_remove(cmd);
2475 hci_dev_unlock(hdev);
2478 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2481 struct mgmt_cp_set_local_name *cp = data;
2482 struct pending_cmd *cmd;
2483 struct hci_request req;
2490 /* If the old values are the same as the new ones just return a
2491 * direct command complete event.
2493 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2494 !memcmp(hdev->short_name, cp->short_name,
2495 sizeof(hdev->short_name))) {
2496 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2501 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2503 if (!hdev_is_powered(hdev)) {
2504 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2506 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2511 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2517 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2523 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2525 hci_req_init(&req, hdev);
2527 if (lmp_bredr_capable(hdev)) {
2532 if (lmp_le_capable(hdev))
2533 hci_update_ad(&req);
2535 err = hci_req_run(&req, set_name_complete);
2537 mgmt_pending_remove(cmd);
2540 hci_dev_unlock(hdev);
2544 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2545 void *data, u16 data_len)
2547 struct pending_cmd *cmd;
2550 BT_DBG("%s", hdev->name);
2554 if (!hdev_is_powered(hdev)) {
2555 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2556 MGMT_STATUS_NOT_POWERED);
2560 if (!lmp_ssp_capable(hdev)) {
2561 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2562 MGMT_STATUS_NOT_SUPPORTED);
2566 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2567 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2572 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2578 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2580 mgmt_pending_remove(cmd);
2583 hci_dev_unlock(hdev);
2587 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2588 void *data, u16 len)
2590 struct mgmt_cp_add_remote_oob_data *cp = data;
2594 BT_DBG("%s ", hdev->name);
2598 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2601 status = MGMT_STATUS_FAILED;
2603 status = MGMT_STATUS_SUCCESS;
2605 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2606 &cp->addr, sizeof(cp->addr));
2608 hci_dev_unlock(hdev);
2612 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2613 void *data, u16 len)
2615 struct mgmt_cp_remove_remote_oob_data *cp = data;
2619 BT_DBG("%s", hdev->name);
2623 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2625 status = MGMT_STATUS_INVALID_PARAMS;
2627 status = MGMT_STATUS_SUCCESS;
2629 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2630 status, &cp->addr, sizeof(cp->addr));
2632 hci_dev_unlock(hdev);
2636 int mgmt_interleaved_discovery(struct hci_dev *hdev)
2640 BT_DBG("%s", hdev->name);
2644 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
2646 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2648 hci_dev_unlock(hdev);
2653 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2654 void *data, u16 len)
2656 struct mgmt_cp_start_discovery *cp = data;
2657 struct pending_cmd *cmd;
2660 BT_DBG("%s", hdev->name);
2664 if (!hdev_is_powered(hdev)) {
2665 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2666 MGMT_STATUS_NOT_POWERED);
2670 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2671 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2676 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2677 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2682 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2688 hdev->discovery.type = cp->type;
2690 switch (hdev->discovery.type) {
2691 case DISCOV_TYPE_BREDR:
2692 if (!lmp_bredr_capable(hdev)) {
2693 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2694 MGMT_STATUS_NOT_SUPPORTED);
2695 mgmt_pending_remove(cmd);
2699 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2702 case DISCOV_TYPE_LE:
2703 if (!lmp_host_le_capable(hdev)) {
2704 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2705 MGMT_STATUS_NOT_SUPPORTED);
2706 mgmt_pending_remove(cmd);
2710 err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT,
2711 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2714 case DISCOV_TYPE_INTERLEAVED:
2715 if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
2716 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2717 MGMT_STATUS_NOT_SUPPORTED);
2718 mgmt_pending_remove(cmd);
2722 err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT,
2723 LE_SCAN_WIN, LE_SCAN_TIMEOUT_BREDR_LE);
2727 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2728 MGMT_STATUS_INVALID_PARAMS);
2729 mgmt_pending_remove(cmd);
2734 mgmt_pending_remove(cmd);
2736 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2739 hci_dev_unlock(hdev);
2743 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2746 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2747 struct pending_cmd *cmd;
2748 struct hci_cp_remote_name_req_cancel cp;
2749 struct inquiry_entry *e;
2752 BT_DBG("%s", hdev->name);
2756 if (!hci_discovery_active(hdev)) {
2757 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2758 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2759 sizeof(mgmt_cp->type));
2763 if (hdev->discovery.type != mgmt_cp->type) {
2764 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2765 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2766 sizeof(mgmt_cp->type));
2770 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2776 switch (hdev->discovery.state) {
2777 case DISCOVERY_FINDING:
2778 if (test_bit(HCI_INQUIRY, &hdev->flags))
2779 err = hci_cancel_inquiry(hdev);
2781 err = hci_cancel_le_scan(hdev);
2785 case DISCOVERY_RESOLVING:
2786 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2789 mgmt_pending_remove(cmd);
2790 err = cmd_complete(sk, hdev->id,
2791 MGMT_OP_STOP_DISCOVERY, 0,
2793 sizeof(mgmt_cp->type));
2794 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2798 bacpy(&cp.bdaddr, &e->data.bdaddr);
2799 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2805 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2810 mgmt_pending_remove(cmd);
2812 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2815 hci_dev_unlock(hdev);
2819 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2822 struct mgmt_cp_confirm_name *cp = data;
2823 struct inquiry_entry *e;
2826 BT_DBG("%s", hdev->name);
2830 if (!hci_discovery_active(hdev)) {
2831 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2832 MGMT_STATUS_FAILED);
2836 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2838 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2839 MGMT_STATUS_INVALID_PARAMS);
2843 if (cp->name_known) {
2844 e->name_state = NAME_KNOWN;
2847 e->name_state = NAME_NEEDED;
2848 hci_inquiry_cache_update_resolve(hdev, e);
2851 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2855 hci_dev_unlock(hdev);
2859 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2862 struct mgmt_cp_block_device *cp = data;
2866 BT_DBG("%s", hdev->name);
2868 if (!bdaddr_type_is_valid(cp->addr.type))
2869 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2870 MGMT_STATUS_INVALID_PARAMS,
2871 &cp->addr, sizeof(cp->addr));
2875 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2877 status = MGMT_STATUS_FAILED;
2879 status = MGMT_STATUS_SUCCESS;
2881 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2882 &cp->addr, sizeof(cp->addr));
2884 hci_dev_unlock(hdev);
2889 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2892 struct mgmt_cp_unblock_device *cp = data;
2896 BT_DBG("%s", hdev->name);
2898 if (!bdaddr_type_is_valid(cp->addr.type))
2899 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
2900 MGMT_STATUS_INVALID_PARAMS,
2901 &cp->addr, sizeof(cp->addr));
2905 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2907 status = MGMT_STATUS_INVALID_PARAMS;
2909 status = MGMT_STATUS_SUCCESS;
2911 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2912 &cp->addr, sizeof(cp->addr));
2914 hci_dev_unlock(hdev);
2919 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2922 struct mgmt_cp_set_device_id *cp = data;
2923 struct hci_request req;
2927 BT_DBG("%s", hdev->name);
2929 source = __le16_to_cpu(cp->source);
2931 if (source > 0x0002)
2932 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2933 MGMT_STATUS_INVALID_PARAMS);
2937 hdev->devid_source = source;
2938 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2939 hdev->devid_product = __le16_to_cpu(cp->product);
2940 hdev->devid_version = __le16_to_cpu(cp->version);
2942 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2944 hci_req_init(&req, hdev);
2946 hci_req_run(&req, NULL);
2948 hci_dev_unlock(hdev);
2953 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
2955 struct pending_cmd *cmd;
2957 BT_DBG("status 0x%02x", status);
2961 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2966 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2967 mgmt_status(status));
2969 struct mgmt_mode *cp = cmd->param;
2972 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
2974 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
2976 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2977 new_settings(hdev, cmd->sk);
2980 mgmt_pending_remove(cmd);
2983 hci_dev_unlock(hdev);
2986 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2987 void *data, u16 len)
2989 struct mgmt_mode *cp = data;
2990 struct pending_cmd *cmd;
2991 struct hci_request req;
2994 BT_DBG("%s", hdev->name);
2996 if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
2997 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2998 MGMT_STATUS_NOT_SUPPORTED);
3000 if (cp->val != 0x00 && cp->val != 0x01)
3001 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3002 MGMT_STATUS_INVALID_PARAMS);
3004 if (!hdev_is_powered(hdev))
3005 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3006 MGMT_STATUS_NOT_POWERED);
3008 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3009 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3010 MGMT_STATUS_REJECTED);
3014 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3015 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3020 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3021 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3026 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3033 hci_req_init(&req, hdev);
3035 write_fast_connectable(&req, cp->val);
3037 err = hci_req_run(&req, fast_connectable_complete);
3039 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3040 MGMT_STATUS_FAILED);
3041 mgmt_pending_remove(cmd);
3045 hci_dev_unlock(hdev);
3050 static bool ltk_is_valid(struct mgmt_ltk_info *key)
3052 if (key->authenticated != 0x00 && key->authenticated != 0x01)
3054 if (key->master != 0x00 && key->master != 0x01)
3056 if (!bdaddr_type_is_le(key->addr.type))
3061 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3062 void *cp_data, u16 len)
3064 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3065 u16 key_count, expected_len;
3068 key_count = __le16_to_cpu(cp->key_count);
3070 expected_len = sizeof(*cp) + key_count *
3071 sizeof(struct mgmt_ltk_info);
3072 if (expected_len != len) {
3073 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3075 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3076 MGMT_STATUS_INVALID_PARAMS);
3079 BT_DBG("%s key_count %u", hdev->name, key_count);
3081 for (i = 0; i < key_count; i++) {
3082 struct mgmt_ltk_info *key = &cp->keys[i];
3084 if (!ltk_is_valid(key))
3085 return cmd_status(sk, hdev->id,
3086 MGMT_OP_LOAD_LONG_TERM_KEYS,
3087 MGMT_STATUS_INVALID_PARAMS);
3092 hci_smp_ltks_clear(hdev);
3094 for (i = 0; i < key_count; i++) {
3095 struct mgmt_ltk_info *key = &cp->keys[i];
3101 type = HCI_SMP_LTK_SLAVE;
3103 hci_add_ltk(hdev, &key->addr.bdaddr,
3104 bdaddr_to_le(key->addr.type),
3105 type, 0, key->authenticated, key->val,
3106 key->enc_size, key->ediv, key->rand);
3109 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3112 hci_dev_unlock(hdev);
3117 static const struct mgmt_handler {
3118 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3122 } mgmt_handlers[] = {
3123 { NULL }, /* 0x0000 (no command) */
3124 { read_version, false, MGMT_READ_VERSION_SIZE },
3125 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3126 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3127 { read_controller_info, false, MGMT_READ_INFO_SIZE },
3128 { set_powered, false, MGMT_SETTING_SIZE },
3129 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
3130 { set_connectable, false, MGMT_SETTING_SIZE },
3131 { set_fast_connectable, false, MGMT_SETTING_SIZE },
3132 { set_pairable, false, MGMT_SETTING_SIZE },
3133 { set_link_security, false, MGMT_SETTING_SIZE },
3134 { set_ssp, false, MGMT_SETTING_SIZE },
3135 { set_hs, false, MGMT_SETTING_SIZE },
3136 { set_le, false, MGMT_SETTING_SIZE },
3137 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
3138 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
3139 { add_uuid, false, MGMT_ADD_UUID_SIZE },
3140 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
3141 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
3142 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
3143 { disconnect, false, MGMT_DISCONNECT_SIZE },
3144 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
3145 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
3146 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
3147 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
3148 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
3149 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
3150 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
3151 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
3152 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
3153 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
3154 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
3155 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
3156 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
3157 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
3158 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3159 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3160 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3161 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3162 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3163 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3167 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3171 struct mgmt_hdr *hdr;
3172 u16 opcode, index, len;
3173 struct hci_dev *hdev = NULL;
3174 const struct mgmt_handler *handler;
3177 BT_DBG("got %zu bytes", msglen);
3179 if (msglen < sizeof(*hdr))
3182 buf = kmalloc(msglen, GFP_KERNEL);
3186 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3192 opcode = __le16_to_cpu(hdr->opcode);
3193 index = __le16_to_cpu(hdr->index);
3194 len = __le16_to_cpu(hdr->len);
3196 if (len != msglen - sizeof(*hdr)) {
3201 if (index != MGMT_INDEX_NONE) {
3202 hdev = hci_dev_get(index);
3204 err = cmd_status(sk, index, opcode,
3205 MGMT_STATUS_INVALID_INDEX);
3210 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3211 mgmt_handlers[opcode].func == NULL) {
3212 BT_DBG("Unknown op %u", opcode);
3213 err = cmd_status(sk, index, opcode,
3214 MGMT_STATUS_UNKNOWN_COMMAND);
3218 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3219 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3220 err = cmd_status(sk, index, opcode,
3221 MGMT_STATUS_INVALID_INDEX);
3225 handler = &mgmt_handlers[opcode];
3227 if ((handler->var_len && len < handler->data_len) ||
3228 (!handler->var_len && len != handler->data_len)) {
3229 err = cmd_status(sk, index, opcode,
3230 MGMT_STATUS_INVALID_PARAMS);
3235 mgmt_init_hdev(sk, hdev);
3237 cp = buf + sizeof(*hdr);
3239 err = handler->func(sk, hdev, cp, len);
3253 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
3257 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
3258 mgmt_pending_remove(cmd);
3261 int mgmt_index_added(struct hci_dev *hdev)
3263 if (!mgmt_valid_hdev(hdev))
3266 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3269 int mgmt_index_removed(struct hci_dev *hdev)
3271 u8 status = MGMT_STATUS_INVALID_INDEX;
3273 if (!mgmt_valid_hdev(hdev))
3276 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3278 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3283 struct hci_dev *hdev;
3287 static void settings_rsp(struct pending_cmd *cmd, void *data)
3289 struct cmd_lookup *match = data;
3291 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
3293 list_del(&cmd->list);
3295 if (match->sk == NULL) {
3296 match->sk = cmd->sk;
3297 sock_hold(match->sk);
3300 mgmt_pending_free(cmd);
3303 static void set_bredr_scan(struct hci_request *req)
3305 struct hci_dev *hdev = req->hdev;
3308 /* Ensure that fast connectable is disabled. This function will
3309 * not do anything if the page scan parameters are already what
3312 write_fast_connectable(req, false);
3314 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3316 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3317 scan |= SCAN_INQUIRY;
3320 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3323 static void powered_complete(struct hci_dev *hdev, u8 status)
3325 struct cmd_lookup match = { NULL, hdev };
3327 BT_DBG("status 0x%02x", status);
3331 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3333 new_settings(hdev, match.sk);
3335 hci_dev_unlock(hdev);
3341 static int powered_update_hci(struct hci_dev *hdev)
3343 struct hci_request req;
3346 hci_req_init(&req, hdev);
3348 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3349 !lmp_host_ssp_capable(hdev)) {
3352 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3355 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
3356 lmp_bredr_capable(hdev)) {
3357 struct hci_cp_write_le_host_supported cp;
3360 cp.simul = lmp_le_br_capable(hdev);
3362 /* Check first if we already have the right
3363 * host state (host features set)
3365 if (cp.le != lmp_host_le_capable(hdev) ||
3366 cp.simul != lmp_host_le_br_capable(hdev))
3367 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3371 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3372 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3373 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3374 sizeof(link_sec), &link_sec);
3376 if (lmp_bredr_capable(hdev)) {
3377 set_bredr_scan(&req);
3383 return hci_req_run(&req, powered_complete);
3386 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3388 struct cmd_lookup match = { NULL, hdev };
3389 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3390 u8 zero_cod[] = { 0, 0, 0 };
3393 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3397 if (powered_update_hci(hdev) == 0)
3400 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3405 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3406 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3408 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3409 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3410 zero_cod, sizeof(zero_cod), NULL);
3413 err = new_settings(hdev, match.sk);
3421 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3423 struct cmd_lookup match = { NULL, hdev };
3424 bool changed = false;
3428 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3431 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3435 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3439 err = new_settings(hdev, match.sk);
3447 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3449 struct pending_cmd *cmd;
3450 bool changed = false;
3454 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3457 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3461 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
3464 err = new_settings(hdev, cmd ? cmd->sk : NULL);
3469 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3471 u8 mgmt_err = mgmt_status(status);
3473 if (scan & SCAN_PAGE)
3474 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3475 cmd_status_rsp, &mgmt_err);
3477 if (scan & SCAN_INQUIRY)
3478 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3479 cmd_status_rsp, &mgmt_err);
3484 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3487 struct mgmt_ev_new_link_key ev;
3489 memset(&ev, 0, sizeof(ev));
3491 ev.store_hint = persistent;
3492 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3493 ev.key.addr.type = BDADDR_BREDR;
3494 ev.key.type = key->type;
3495 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3496 ev.key.pin_len = key->pin_len;
3498 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3501 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3503 struct mgmt_ev_new_long_term_key ev;
3505 memset(&ev, 0, sizeof(ev));
3507 ev.store_hint = persistent;
3508 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3509 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3510 ev.key.authenticated = key->authenticated;
3511 ev.key.enc_size = key->enc_size;
3512 ev.key.ediv = key->ediv;
3514 if (key->type == HCI_SMP_LTK)
3517 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3518 memcpy(ev.key.val, key->val, sizeof(key->val));
3520 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3524 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3525 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3529 struct mgmt_ev_device_connected *ev = (void *) buf;
3532 bacpy(&ev->addr.bdaddr, bdaddr);
3533 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3535 ev->flags = __cpu_to_le32(flags);
3538 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3541 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3542 eir_len = eir_append_data(ev->eir, eir_len,
3543 EIR_CLASS_OF_DEV, dev_class, 3);
3545 ev->eir_len = cpu_to_le16(eir_len);
3547 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3548 sizeof(*ev) + eir_len, NULL);
3551 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3553 struct mgmt_cp_disconnect *cp = cmd->param;
3554 struct sock **sk = data;
3555 struct mgmt_rp_disconnect rp;
3557 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3558 rp.addr.type = cp->addr.type;
3560 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3566 mgmt_pending_remove(cmd);
3569 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3571 struct hci_dev *hdev = data;
3572 struct mgmt_cp_unpair_device *cp = cmd->param;
3573 struct mgmt_rp_unpair_device rp;
3575 memset(&rp, 0, sizeof(rp));
3576 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3577 rp.addr.type = cp->addr.type;
3579 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3581 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3583 mgmt_pending_remove(cmd);
3586 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3587 u8 link_type, u8 addr_type, u8 reason)
3589 struct mgmt_ev_device_disconnected ev;
3590 struct sock *sk = NULL;
3593 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3595 bacpy(&ev.addr.bdaddr, bdaddr);
3596 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3599 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3605 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3611 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3612 u8 link_type, u8 addr_type, u8 status)
3614 struct mgmt_rp_disconnect rp;
3615 struct pending_cmd *cmd;
3618 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3621 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3625 bacpy(&rp.addr.bdaddr, bdaddr);
3626 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3628 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3629 mgmt_status(status), &rp, sizeof(rp));
3631 mgmt_pending_remove(cmd);
3636 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3637 u8 addr_type, u8 status)
3639 struct mgmt_ev_connect_failed ev;
3641 bacpy(&ev.addr.bdaddr, bdaddr);
3642 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3643 ev.status = mgmt_status(status);
3645 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3648 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3650 struct mgmt_ev_pin_code_request ev;
3652 bacpy(&ev.addr.bdaddr, bdaddr);
3653 ev.addr.type = BDADDR_BREDR;
3656 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3660 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3663 struct pending_cmd *cmd;
3664 struct mgmt_rp_pin_code_reply rp;
3667 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3671 bacpy(&rp.addr.bdaddr, bdaddr);
3672 rp.addr.type = BDADDR_BREDR;
3674 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3675 mgmt_status(status), &rp, sizeof(rp));
3677 mgmt_pending_remove(cmd);
3682 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3685 struct pending_cmd *cmd;
3686 struct mgmt_rp_pin_code_reply rp;
3689 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3693 bacpy(&rp.addr.bdaddr, bdaddr);
3694 rp.addr.type = BDADDR_BREDR;
3696 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3697 mgmt_status(status), &rp, sizeof(rp));
3699 mgmt_pending_remove(cmd);
3704 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3705 u8 link_type, u8 addr_type, __le32 value,
3708 struct mgmt_ev_user_confirm_request ev;
3710 BT_DBG("%s", hdev->name);
3712 bacpy(&ev.addr.bdaddr, bdaddr);
3713 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3714 ev.confirm_hint = confirm_hint;
3717 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3721 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3722 u8 link_type, u8 addr_type)
3724 struct mgmt_ev_user_passkey_request ev;
3726 BT_DBG("%s", hdev->name);
3728 bacpy(&ev.addr.bdaddr, bdaddr);
3729 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3731 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3735 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3736 u8 link_type, u8 addr_type, u8 status,
3739 struct pending_cmd *cmd;
3740 struct mgmt_rp_user_confirm_reply rp;
3743 cmd = mgmt_pending_find(opcode, hdev);
3747 bacpy(&rp.addr.bdaddr, bdaddr);
3748 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3749 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3752 mgmt_pending_remove(cmd);
3757 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3758 u8 link_type, u8 addr_type, u8 status)
3760 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3761 status, MGMT_OP_USER_CONFIRM_REPLY);
3764 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3765 u8 link_type, u8 addr_type, u8 status)
3767 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3769 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3772 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3773 u8 link_type, u8 addr_type, u8 status)
3775 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3776 status, MGMT_OP_USER_PASSKEY_REPLY);
3779 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3780 u8 link_type, u8 addr_type, u8 status)
3782 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3784 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3787 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3788 u8 link_type, u8 addr_type, u32 passkey,
3791 struct mgmt_ev_passkey_notify ev;
3793 BT_DBG("%s", hdev->name);
3795 bacpy(&ev.addr.bdaddr, bdaddr);
3796 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3797 ev.passkey = __cpu_to_le32(passkey);
3798 ev.entered = entered;
3800 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3803 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3804 u8 addr_type, u8 status)
3806 struct mgmt_ev_auth_failed ev;
3808 bacpy(&ev.addr.bdaddr, bdaddr);
3809 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3810 ev.status = mgmt_status(status);
3812 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3815 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3817 struct cmd_lookup match = { NULL, hdev };
3818 bool changed = false;
3822 u8 mgmt_err = mgmt_status(status);
3823 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3824 cmd_status_rsp, &mgmt_err);
3828 if (test_bit(HCI_AUTH, &hdev->flags)) {
3829 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3832 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3836 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3840 err = new_settings(hdev, match.sk);
3848 static void clear_eir(struct hci_request *req)
3850 struct hci_dev *hdev = req->hdev;
3851 struct hci_cp_write_eir cp;
3853 if (!lmp_ext_inq_capable(hdev))
3856 memset(hdev->eir, 0, sizeof(hdev->eir));
3858 memset(&cp, 0, sizeof(cp));
3860 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3863 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3865 struct cmd_lookup match = { NULL, hdev };
3866 struct hci_request req;
3867 bool changed = false;
3871 u8 mgmt_err = mgmt_status(status);
3873 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
3875 err = new_settings(hdev, NULL);
3877 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3884 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3887 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3891 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
3894 err = new_settings(hdev, match.sk);
3899 hci_req_init(&req, hdev);
3901 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3906 hci_req_run(&req, NULL);
3911 static void sk_lookup(struct pending_cmd *cmd, void *data)
3913 struct cmd_lookup *match = data;
3915 if (match->sk == NULL) {
3916 match->sk = cmd->sk;
3917 sock_hold(match->sk);
3921 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3924 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3927 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
3928 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
3929 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
3932 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3941 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
3943 struct mgmt_cp_set_local_name ev;
3944 struct pending_cmd *cmd;
3949 memset(&ev, 0, sizeof(ev));
3950 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3951 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
3953 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3955 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3957 /* If this is a HCI command related to powering on the
3958 * HCI dev don't send any mgmt signals.
3960 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
3964 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
3965 cmd ? cmd->sk : NULL);
3968 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
3969 u8 *randomizer, u8 status)
3971 struct pending_cmd *cmd;
3974 BT_DBG("%s status %u", hdev->name, status);
3976 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3981 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3982 mgmt_status(status));
3984 struct mgmt_rp_read_local_oob_data rp;
3986 memcpy(rp.hash, hash, sizeof(rp.hash));
3987 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
3989 err = cmd_complete(cmd->sk, hdev->id,
3990 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
3994 mgmt_pending_remove(cmd);
3999 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4001 struct cmd_lookup match = { NULL, hdev };
4002 bool changed = false;
4006 u8 mgmt_err = mgmt_status(status);
4008 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
4010 err = new_settings(hdev, NULL);
4012 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
4019 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4022 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4026 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
4029 err = new_settings(hdev, match.sk);
4037 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4038 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4039 ssp, u8 *eir, u16 eir_len)
4042 struct mgmt_ev_device_found *ev = (void *) buf;
4045 /* Leave 5 bytes for a potential CoD field */
4046 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4049 memset(buf, 0, sizeof(buf));
4051 bacpy(&ev->addr.bdaddr, bdaddr);
4052 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4055 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4057 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4060 memcpy(ev->eir, eir, eir_len);
4062 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4063 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4066 ev->eir_len = cpu_to_le16(eir_len);
4067 ev_size = sizeof(*ev) + eir_len;
4069 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4072 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4073 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4075 struct mgmt_ev_device_found *ev;
4076 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4079 ev = (struct mgmt_ev_device_found *) buf;
4081 memset(buf, 0, sizeof(buf));
4083 bacpy(&ev->addr.bdaddr, bdaddr);
4084 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4087 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4090 ev->eir_len = cpu_to_le16(eir_len);
4092 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
4093 sizeof(*ev) + eir_len, NULL);
4096 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
4098 struct pending_cmd *cmd;
4102 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4104 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4108 type = hdev->discovery.type;
4110 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4111 &type, sizeof(type));
4112 mgmt_pending_remove(cmd);
4117 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
4119 struct pending_cmd *cmd;
4122 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4126 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4127 &hdev->discovery.type, sizeof(hdev->discovery.type));
4128 mgmt_pending_remove(cmd);
4133 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4135 struct mgmt_ev_discovering ev;
4136 struct pending_cmd *cmd;
4138 BT_DBG("%s discovering %u", hdev->name, discovering);
4141 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4143 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4146 u8 type = hdev->discovery.type;
4148 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4150 mgmt_pending_remove(cmd);
4153 memset(&ev, 0, sizeof(ev));
4154 ev.type = hdev->discovery.type;
4155 ev.discovering = discovering;
4157 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4160 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4162 struct pending_cmd *cmd;
4163 struct mgmt_ev_device_blocked ev;
4165 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4167 bacpy(&ev.addr.bdaddr, bdaddr);
4168 ev.addr.type = type;
4170 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4171 cmd ? cmd->sk : NULL);
4174 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4176 struct pending_cmd *cmd;
4177 struct mgmt_ev_device_unblocked ev;
4179 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4181 bacpy(&ev.addr.bdaddr, bdaddr);
4182 ev.addr.type = type;
4184 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4185 cmd ? cmd->sk : NULL);
4188 module_param(enable_hs, bool, 0644);
4189 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");