2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
84 MGMT_EV_INDEX_REMOVED,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
106 * These LE scan and inquiry parameters were chosen according to LE General
107 * Discovery Procedure specification.
109 #define LE_SCAN_WIN 0x12
110 #define LE_SCAN_INT 0x12
111 #define LE_SCAN_TIMEOUT_LE_ONLY msecs_to_jiffies(10240)
112 #define LE_SCAN_TIMEOUT_BREDR_LE msecs_to_jiffies(5120)
114 #define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
115 #define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
117 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
119 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
120 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
123 struct list_head list;
131 /* HCI to MGMT error code conversion table */
132 static u8 mgmt_status_table[] = {
134 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
135 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
136 MGMT_STATUS_FAILED, /* Hardware Failure */
137 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
138 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
139 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
140 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
141 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
142 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
143 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
144 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
145 MGMT_STATUS_BUSY, /* Command Disallowed */
146 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
147 MGMT_STATUS_REJECTED, /* Rejected Security */
148 MGMT_STATUS_REJECTED, /* Rejected Personal */
149 MGMT_STATUS_TIMEOUT, /* Host Timeout */
150 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
151 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
152 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
153 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
154 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
155 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
156 MGMT_STATUS_BUSY, /* Repeated Attempts */
157 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
158 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
159 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
160 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
161 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
162 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
163 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
164 MGMT_STATUS_FAILED, /* Unspecified Error */
165 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
166 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
167 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
168 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
169 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
170 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
171 MGMT_STATUS_FAILED, /* Unit Link Key Used */
172 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
173 MGMT_STATUS_TIMEOUT, /* Instant Passed */
174 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
175 MGMT_STATUS_FAILED, /* Transaction Collision */
176 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
177 MGMT_STATUS_REJECTED, /* QoS Rejected */
178 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
179 MGMT_STATUS_REJECTED, /* Insufficient Security */
180 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
181 MGMT_STATUS_BUSY, /* Role Switch Pending */
182 MGMT_STATUS_FAILED, /* Slot Violation */
183 MGMT_STATUS_FAILED, /* Role Switch Failed */
184 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
185 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
186 MGMT_STATUS_BUSY, /* Host Busy Pairing */
187 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
188 MGMT_STATUS_BUSY, /* Controller Busy */
189 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
190 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
191 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
192 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
193 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
196 bool mgmt_valid_hdev(struct hci_dev *hdev)
198 return hdev->dev_type == HCI_BREDR;
201 static u8 mgmt_status(u8 hci_status)
203 if (hci_status < ARRAY_SIZE(mgmt_status_table))
204 return mgmt_status_table[hci_status];
206 return MGMT_STATUS_FAILED;
209 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
212 struct mgmt_hdr *hdr;
213 struct mgmt_ev_cmd_status *ev;
216 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
218 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
222 hdr = (void *) skb_put(skb, sizeof(*hdr));
224 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
225 hdr->index = cpu_to_le16(index);
226 hdr->len = cpu_to_le16(sizeof(*ev));
228 ev = (void *) skb_put(skb, sizeof(*ev));
230 ev->opcode = cpu_to_le16(cmd);
232 err = sock_queue_rcv_skb(sk, skb);
239 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
240 void *rp, size_t rp_len)
243 struct mgmt_hdr *hdr;
244 struct mgmt_ev_cmd_complete *ev;
247 BT_DBG("sock %p", sk);
249 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
253 hdr = (void *) skb_put(skb, sizeof(*hdr));
255 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
256 hdr->index = cpu_to_le16(index);
257 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
259 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
260 ev->opcode = cpu_to_le16(cmd);
264 memcpy(ev->data, rp, rp_len);
266 err = sock_queue_rcv_skb(sk, skb);
273 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
276 struct mgmt_rp_read_version rp;
278 BT_DBG("sock %p", sk);
280 rp.version = MGMT_VERSION;
281 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
283 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
287 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
290 struct mgmt_rp_read_commands *rp;
291 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
292 const u16 num_events = ARRAY_SIZE(mgmt_events);
297 BT_DBG("sock %p", sk);
299 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
301 rp = kmalloc(rp_size, GFP_KERNEL);
305 rp->num_commands = __constant_cpu_to_le16(num_commands);
306 rp->num_events = __constant_cpu_to_le16(num_events);
308 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
309 put_unaligned_le16(mgmt_commands[i], opcode);
311 for (i = 0; i < num_events; i++, opcode++)
312 put_unaligned_le16(mgmt_events[i], opcode);
314 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
321 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
324 struct mgmt_rp_read_index_list *rp;
330 BT_DBG("sock %p", sk);
332 read_lock(&hci_dev_list_lock);
335 list_for_each_entry(d, &hci_dev_list, list) {
336 if (!mgmt_valid_hdev(d))
342 rp_len = sizeof(*rp) + (2 * count);
343 rp = kmalloc(rp_len, GFP_ATOMIC);
345 read_unlock(&hci_dev_list_lock);
350 list_for_each_entry(d, &hci_dev_list, list) {
351 if (test_bit(HCI_SETUP, &d->dev_flags))
354 if (!mgmt_valid_hdev(d))
357 rp->index[count++] = cpu_to_le16(d->id);
358 BT_DBG("Added hci%u", d->id);
361 rp->num_controllers = cpu_to_le16(count);
362 rp_len = sizeof(*rp) + (2 * count);
364 read_unlock(&hci_dev_list_lock);
366 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
374 static u32 get_supported_settings(struct hci_dev *hdev)
378 settings |= MGMT_SETTING_POWERED;
379 settings |= MGMT_SETTING_PAIRABLE;
381 if (lmp_ssp_capable(hdev))
382 settings |= MGMT_SETTING_SSP;
384 if (lmp_bredr_capable(hdev)) {
385 settings |= MGMT_SETTING_CONNECTABLE;
386 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
387 settings |= MGMT_SETTING_FAST_CONNECTABLE;
388 settings |= MGMT_SETTING_DISCOVERABLE;
389 settings |= MGMT_SETTING_BREDR;
390 settings |= MGMT_SETTING_LINK_SECURITY;
394 settings |= MGMT_SETTING_HS;
396 if (lmp_le_capable(hdev))
397 settings |= MGMT_SETTING_LE;
402 static u32 get_current_settings(struct hci_dev *hdev)
406 if (hdev_is_powered(hdev))
407 settings |= MGMT_SETTING_POWERED;
409 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_CONNECTABLE;
412 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
413 settings |= MGMT_SETTING_FAST_CONNECTABLE;
415 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
416 settings |= MGMT_SETTING_DISCOVERABLE;
418 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
419 settings |= MGMT_SETTING_PAIRABLE;
421 if (lmp_bredr_capable(hdev))
422 settings |= MGMT_SETTING_BREDR;
424 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
425 settings |= MGMT_SETTING_LE;
427 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
428 settings |= MGMT_SETTING_LINK_SECURITY;
430 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
431 settings |= MGMT_SETTING_SSP;
433 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
434 settings |= MGMT_SETTING_HS;
439 #define PNP_INFO_SVCLASS_ID 0x1200
441 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
443 u8 *ptr = data, *uuids_start = NULL;
444 struct bt_uuid *uuid;
449 list_for_each_entry(uuid, &hdev->uuids, list) {
452 if (uuid->size != 16)
455 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
459 if (uuid16 == PNP_INFO_SVCLASS_ID)
465 uuids_start[1] = EIR_UUID16_ALL;
469 /* Stop if not enough space to put next UUID */
470 if ((ptr - data) + sizeof(u16) > len) {
471 uuids_start[1] = EIR_UUID16_SOME;
475 *ptr++ = (uuid16 & 0x00ff);
476 *ptr++ = (uuid16 & 0xff00) >> 8;
477 uuids_start[0] += sizeof(uuid16);
483 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
485 u8 *ptr = data, *uuids_start = NULL;
486 struct bt_uuid *uuid;
491 list_for_each_entry(uuid, &hdev->uuids, list) {
492 if (uuid->size != 32)
498 uuids_start[1] = EIR_UUID32_ALL;
502 /* Stop if not enough space to put next UUID */
503 if ((ptr - data) + sizeof(u32) > len) {
504 uuids_start[1] = EIR_UUID32_SOME;
508 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
510 uuids_start[0] += sizeof(u32);
516 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
518 u8 *ptr = data, *uuids_start = NULL;
519 struct bt_uuid *uuid;
524 list_for_each_entry(uuid, &hdev->uuids, list) {
525 if (uuid->size != 128)
531 uuids_start[1] = EIR_UUID128_ALL;
535 /* Stop if not enough space to put next UUID */
536 if ((ptr - data) + 16 > len) {
537 uuids_start[1] = EIR_UUID128_SOME;
541 memcpy(ptr, uuid->uuid, 16);
543 uuids_start[0] += 16;
549 static void create_eir(struct hci_dev *hdev, u8 *data)
554 name_len = strlen(hdev->dev_name);
560 ptr[1] = EIR_NAME_SHORT;
562 ptr[1] = EIR_NAME_COMPLETE;
564 /* EIR Data length */
565 ptr[0] = name_len + 1;
567 memcpy(ptr + 2, hdev->dev_name, name_len);
569 ptr += (name_len + 2);
572 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
574 ptr[1] = EIR_TX_POWER;
575 ptr[2] = (u8) hdev->inq_tx_power;
580 if (hdev->devid_source > 0) {
582 ptr[1] = EIR_DEVICE_ID;
584 put_unaligned_le16(hdev->devid_source, ptr + 2);
585 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
586 put_unaligned_le16(hdev->devid_product, ptr + 6);
587 put_unaligned_le16(hdev->devid_version, ptr + 8);
592 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
593 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
594 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
597 static void update_eir(struct hci_request *req)
599 struct hci_dev *hdev = req->hdev;
600 struct hci_cp_write_eir cp;
602 if (!hdev_is_powered(hdev))
605 if (!lmp_ext_inq_capable(hdev))
608 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
611 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
614 memset(&cp, 0, sizeof(cp));
616 create_eir(hdev, cp.data);
618 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
621 memcpy(hdev->eir, cp.data, sizeof(cp.data));
623 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
626 static u8 get_service_classes(struct hci_dev *hdev)
628 struct bt_uuid *uuid;
631 list_for_each_entry(uuid, &hdev->uuids, list)
632 val |= uuid->svc_hint;
637 static void update_class(struct hci_request *req)
639 struct hci_dev *hdev = req->hdev;
642 BT_DBG("%s", hdev->name);
644 if (!hdev_is_powered(hdev))
647 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
650 cod[0] = hdev->minor_class;
651 cod[1] = hdev->major_class;
652 cod[2] = get_service_classes(hdev);
654 if (memcmp(cod, hdev->dev_class, 3) == 0)
657 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
660 static void service_cache_off(struct work_struct *work)
662 struct hci_dev *hdev = container_of(work, struct hci_dev,
664 struct hci_request req;
666 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
669 hci_req_init(&req, hdev);
676 hci_dev_unlock(hdev);
678 hci_req_run(&req, NULL);
681 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
683 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
686 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
688 /* Non-mgmt controlled devices get this bit set
689 * implicitly so that pairing works for them, however
690 * for mgmt we require user-space to explicitly enable
693 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
696 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
697 void *data, u16 data_len)
699 struct mgmt_rp_read_info rp;
701 BT_DBG("sock %p %s", sk, hdev->name);
705 memset(&rp, 0, sizeof(rp));
707 bacpy(&rp.bdaddr, &hdev->bdaddr);
709 rp.version = hdev->hci_ver;
710 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
712 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
713 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
715 memcpy(rp.dev_class, hdev->dev_class, 3);
717 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
718 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
720 hci_dev_unlock(hdev);
722 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
726 static void mgmt_pending_free(struct pending_cmd *cmd)
733 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
734 struct hci_dev *hdev, void *data,
737 struct pending_cmd *cmd;
739 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
743 cmd->opcode = opcode;
744 cmd->index = hdev->id;
746 cmd->param = kmalloc(len, GFP_KERNEL);
753 memcpy(cmd->param, data, len);
758 list_add(&cmd->list, &hdev->mgmt_pending);
763 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
764 void (*cb)(struct pending_cmd *cmd,
768 struct pending_cmd *cmd, *tmp;
770 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
771 if (opcode > 0 && cmd->opcode != opcode)
778 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
780 struct pending_cmd *cmd;
782 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
783 if (cmd->opcode == opcode)
790 static void mgmt_pending_remove(struct pending_cmd *cmd)
792 list_del(&cmd->list);
793 mgmt_pending_free(cmd);
796 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
798 __le32 settings = cpu_to_le32(get_current_settings(hdev));
800 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
804 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
807 struct mgmt_mode *cp = data;
808 struct pending_cmd *cmd;
811 BT_DBG("request for %s", hdev->name);
813 if (cp->val != 0x00 && cp->val != 0x01)
814 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
815 MGMT_STATUS_INVALID_PARAMS);
819 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
820 cancel_delayed_work(&hdev->power_off);
823 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
825 err = mgmt_powered(hdev, 1);
830 if (!!cp->val == hdev_is_powered(hdev)) {
831 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
835 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
836 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
841 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
848 queue_work(hdev->req_workqueue, &hdev->power_on);
850 queue_work(hdev->req_workqueue, &hdev->power_off.work);
855 hci_dev_unlock(hdev);
859 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
860 struct sock *skip_sk)
863 struct mgmt_hdr *hdr;
865 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
869 hdr = (void *) skb_put(skb, sizeof(*hdr));
870 hdr->opcode = cpu_to_le16(event);
872 hdr->index = cpu_to_le16(hdev->id);
874 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
875 hdr->len = cpu_to_le16(data_len);
878 memcpy(skb_put(skb, data_len), data, data_len);
881 __net_timestamp(skb);
883 hci_send_to_control(skb, skip_sk);
889 static int new_settings(struct hci_dev *hdev, struct sock *skip)
893 ev = cpu_to_le32(get_current_settings(hdev));
895 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
898 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
901 struct mgmt_cp_set_discoverable *cp = data;
902 struct pending_cmd *cmd;
907 BT_DBG("request for %s", hdev->name);
909 if (!lmp_bredr_capable(hdev))
910 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
911 MGMT_STATUS_NOT_SUPPORTED);
913 if (cp->val != 0x00 && cp->val != 0x01)
914 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
915 MGMT_STATUS_INVALID_PARAMS);
917 timeout = __le16_to_cpu(cp->timeout);
918 if (!cp->val && timeout > 0)
919 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
920 MGMT_STATUS_INVALID_PARAMS);
924 if (!hdev_is_powered(hdev) && timeout > 0) {
925 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
926 MGMT_STATUS_NOT_POWERED);
930 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
931 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
932 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
937 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
938 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
939 MGMT_STATUS_REJECTED);
943 if (!hdev_is_powered(hdev)) {
944 bool changed = false;
946 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
947 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
951 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
956 err = new_settings(hdev, sk);
961 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
962 if (hdev->discov_timeout > 0) {
963 cancel_delayed_work(&hdev->discov_off);
964 hdev->discov_timeout = 0;
967 if (cp->val && timeout > 0) {
968 hdev->discov_timeout = timeout;
969 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
970 msecs_to_jiffies(hdev->discov_timeout * 1000));
973 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
977 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
986 scan |= SCAN_INQUIRY;
988 cancel_delayed_work(&hdev->discov_off);
990 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
992 mgmt_pending_remove(cmd);
995 hdev->discov_timeout = timeout;
998 hci_dev_unlock(hdev);
1002 static void write_fast_connectable(struct hci_request *req, bool enable)
1004 struct hci_dev *hdev = req->hdev;
1005 struct hci_cp_write_page_scan_activity acp;
1008 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1012 type = PAGE_SCAN_TYPE_INTERLACED;
1014 /* 160 msec page scan interval */
1015 acp.interval = __constant_cpu_to_le16(0x0100);
1017 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1019 /* default 1.28 sec page scan */
1020 acp.interval = __constant_cpu_to_le16(0x0800);
1023 acp.window = __constant_cpu_to_le16(0x0012);
1025 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1026 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1027 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1030 if (hdev->page_scan_type != type)
1031 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1034 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1036 struct pending_cmd *cmd;
1038 BT_DBG("status 0x%02x", status);
1042 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1046 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1048 mgmt_pending_remove(cmd);
1051 hci_dev_unlock(hdev);
1054 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1057 struct mgmt_mode *cp = data;
1058 struct pending_cmd *cmd;
1059 struct hci_request req;
1063 BT_DBG("request for %s", hdev->name);
1065 if (!lmp_bredr_capable(hdev))
1066 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1067 MGMT_STATUS_NOT_SUPPORTED);
1069 if (cp->val != 0x00 && cp->val != 0x01)
1070 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1071 MGMT_STATUS_INVALID_PARAMS);
1075 if (!hdev_is_powered(hdev)) {
1076 bool changed = false;
1078 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1082 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1084 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1085 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1088 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1093 err = new_settings(hdev, sk);
1098 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1099 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1100 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1105 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1106 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1110 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1121 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1122 hdev->discov_timeout > 0)
1123 cancel_delayed_work(&hdev->discov_off);
1126 hci_req_init(&req, hdev);
1128 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1130 /* If we're going from non-connectable to connectable or
1131 * vice-versa when fast connectable is enabled ensure that fast
1132 * connectable gets disabled. write_fast_connectable won't do
1133 * anything if the page scan parameters are already what they
1136 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1137 write_fast_connectable(&req, false);
1139 err = hci_req_run(&req, set_connectable_complete);
1141 mgmt_pending_remove(cmd);
1144 hci_dev_unlock(hdev);
1148 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1151 struct mgmt_mode *cp = data;
1154 BT_DBG("request for %s", hdev->name);
1156 if (cp->val != 0x00 && cp->val != 0x01)
1157 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1158 MGMT_STATUS_INVALID_PARAMS);
1163 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1165 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1167 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1171 err = new_settings(hdev, sk);
1174 hci_dev_unlock(hdev);
1178 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1181 struct mgmt_mode *cp = data;
1182 struct pending_cmd *cmd;
1186 BT_DBG("request for %s", hdev->name);
1188 if (!lmp_bredr_capable(hdev))
1189 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1190 MGMT_STATUS_NOT_SUPPORTED);
1192 if (cp->val != 0x00 && cp->val != 0x01)
1193 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1194 MGMT_STATUS_INVALID_PARAMS);
1198 if (!hdev_is_powered(hdev)) {
1199 bool changed = false;
1201 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1202 &hdev->dev_flags)) {
1203 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1207 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1212 err = new_settings(hdev, sk);
1217 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1218 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1225 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1226 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1230 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1236 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1238 mgmt_pending_remove(cmd);
1243 hci_dev_unlock(hdev);
1247 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1249 struct mgmt_mode *cp = data;
1250 struct pending_cmd *cmd;
1254 BT_DBG("request for %s", hdev->name);
1256 if (!lmp_ssp_capable(hdev))
1257 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1258 MGMT_STATUS_NOT_SUPPORTED);
1260 if (cp->val != 0x00 && cp->val != 0x01)
1261 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1262 MGMT_STATUS_INVALID_PARAMS);
1268 if (!hdev_is_powered(hdev)) {
1269 bool changed = false;
1271 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1272 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1276 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1281 err = new_settings(hdev, sk);
1286 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1287 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1292 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1293 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1297 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1303 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1305 mgmt_pending_remove(cmd);
1310 hci_dev_unlock(hdev);
1314 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1316 struct mgmt_mode *cp = data;
1318 BT_DBG("request for %s", hdev->name);
1321 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1322 MGMT_STATUS_NOT_SUPPORTED);
1324 if (cp->val != 0x00 && cp->val != 0x01)
1325 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1326 MGMT_STATUS_INVALID_PARAMS);
1329 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1331 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1333 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1336 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1338 struct mgmt_mode *cp = data;
1339 struct hci_cp_write_le_host_supported hci_cp;
1340 struct pending_cmd *cmd;
1344 BT_DBG("request for %s", hdev->name);
1346 if (!lmp_le_capable(hdev))
1347 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1348 MGMT_STATUS_NOT_SUPPORTED);
1350 if (cp->val != 0x00 && cp->val != 0x01)
1351 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1352 MGMT_STATUS_INVALID_PARAMS);
1354 /* LE-only devices do not allow toggling LE on/off */
1355 if (!lmp_bredr_capable(hdev))
1356 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1357 MGMT_STATUS_REJECTED);
1362 enabled = lmp_host_le_capable(hdev);
1364 if (!hdev_is_powered(hdev) || val == enabled) {
1365 bool changed = false;
1367 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1368 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1372 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1377 err = new_settings(hdev, sk);
1382 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1383 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1388 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1394 memset(&hci_cp, 0, sizeof(hci_cp));
1398 hci_cp.simul = lmp_le_br_capable(hdev);
1401 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1404 mgmt_pending_remove(cmd);
1407 hci_dev_unlock(hdev);
1411 /* This is a helper function to test for pending mgmt commands that can
1412 * cause CoD or EIR HCI commands. We can only allow one such pending
1413 * mgmt command at a time since otherwise we cannot easily track what
1414 * the current values are, will be, and based on that calculate if a new
1415 * HCI command needs to be sent and if yes with what value.
1417 static bool pending_eir_or_class(struct hci_dev *hdev)
1419 struct pending_cmd *cmd;
1421 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1422 switch (cmd->opcode) {
1423 case MGMT_OP_ADD_UUID:
1424 case MGMT_OP_REMOVE_UUID:
1425 case MGMT_OP_SET_DEV_CLASS:
1426 case MGMT_OP_SET_POWERED:
1434 static const u8 bluetooth_base_uuid[] = {
1435 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1436 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1439 static u8 get_uuid_size(const u8 *uuid)
1443 if (memcmp(uuid, bluetooth_base_uuid, 12))
1446 val = get_unaligned_le32(&uuid[12]);
1453 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1455 struct pending_cmd *cmd;
1459 cmd = mgmt_pending_find(mgmt_op, hdev);
1463 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1464 hdev->dev_class, 3);
1466 mgmt_pending_remove(cmd);
1469 hci_dev_unlock(hdev);
1472 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1474 BT_DBG("status 0x%02x", status);
1476 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1479 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1481 struct mgmt_cp_add_uuid *cp = data;
1482 struct pending_cmd *cmd;
1483 struct hci_request req;
1484 struct bt_uuid *uuid;
1487 BT_DBG("request for %s", hdev->name);
1491 if (pending_eir_or_class(hdev)) {
1492 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1497 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1503 memcpy(uuid->uuid, cp->uuid, 16);
1504 uuid->svc_hint = cp->svc_hint;
1505 uuid->size = get_uuid_size(cp->uuid);
1507 list_add_tail(&uuid->list, &hdev->uuids);
1509 hci_req_init(&req, hdev);
1514 err = hci_req_run(&req, add_uuid_complete);
1516 if (err != -ENODATA)
1519 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1520 hdev->dev_class, 3);
1524 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1533 hci_dev_unlock(hdev);
1537 static bool enable_service_cache(struct hci_dev *hdev)
1539 if (!hdev_is_powered(hdev))
1542 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1543 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1551 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1553 BT_DBG("status 0x%02x", status);
1555 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1558 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1561 struct mgmt_cp_remove_uuid *cp = data;
1562 struct pending_cmd *cmd;
1563 struct bt_uuid *match, *tmp;
1564 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1565 struct hci_request req;
1568 BT_DBG("request for %s", hdev->name);
1572 if (pending_eir_or_class(hdev)) {
1573 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1578 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1579 err = hci_uuids_clear(hdev);
1581 if (enable_service_cache(hdev)) {
1582 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1583 0, hdev->dev_class, 3);
1592 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1593 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1596 list_del(&match->list);
1602 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1603 MGMT_STATUS_INVALID_PARAMS);
1608 hci_req_init(&req, hdev);
1613 err = hci_req_run(&req, remove_uuid_complete);
1615 if (err != -ENODATA)
1618 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1619 hdev->dev_class, 3);
1623 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1632 hci_dev_unlock(hdev);
1636 static void set_class_complete(struct hci_dev *hdev, u8 status)
1638 BT_DBG("status 0x%02x", status);
1640 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1643 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1646 struct mgmt_cp_set_dev_class *cp = data;
1647 struct pending_cmd *cmd;
1648 struct hci_request req;
1651 BT_DBG("request for %s", hdev->name);
1653 if (!lmp_bredr_capable(hdev))
1654 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1655 MGMT_STATUS_NOT_SUPPORTED);
1659 if (pending_eir_or_class(hdev)) {
1660 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1665 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1666 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1667 MGMT_STATUS_INVALID_PARAMS);
1671 hdev->major_class = cp->major;
1672 hdev->minor_class = cp->minor;
1674 if (!hdev_is_powered(hdev)) {
1675 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1676 hdev->dev_class, 3);
1680 hci_req_init(&req, hdev);
1682 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1683 hci_dev_unlock(hdev);
1684 cancel_delayed_work_sync(&hdev->service_cache);
1691 err = hci_req_run(&req, set_class_complete);
1693 if (err != -ENODATA)
1696 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1697 hdev->dev_class, 3);
1701 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1710 hci_dev_unlock(hdev);
1714 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1717 struct mgmt_cp_load_link_keys *cp = data;
1718 u16 key_count, expected_len;
1721 key_count = __le16_to_cpu(cp->key_count);
1723 expected_len = sizeof(*cp) + key_count *
1724 sizeof(struct mgmt_link_key_info);
1725 if (expected_len != len) {
1726 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1728 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1729 MGMT_STATUS_INVALID_PARAMS);
1732 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1733 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1734 MGMT_STATUS_INVALID_PARAMS);
1736 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1739 for (i = 0; i < key_count; i++) {
1740 struct mgmt_link_key_info *key = &cp->keys[i];
1742 if (key->addr.type != BDADDR_BREDR)
1743 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1744 MGMT_STATUS_INVALID_PARAMS);
1749 hci_link_keys_clear(hdev);
1751 set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1754 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1756 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1758 for (i = 0; i < key_count; i++) {
1759 struct mgmt_link_key_info *key = &cp->keys[i];
1761 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1762 key->type, key->pin_len);
1765 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1767 hci_dev_unlock(hdev);
1772 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1773 u8 addr_type, struct sock *skip_sk)
1775 struct mgmt_ev_device_unpaired ev;
1777 bacpy(&ev.addr.bdaddr, bdaddr);
1778 ev.addr.type = addr_type;
1780 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1784 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1787 struct mgmt_cp_unpair_device *cp = data;
1788 struct mgmt_rp_unpair_device rp;
1789 struct hci_cp_disconnect dc;
1790 struct pending_cmd *cmd;
1791 struct hci_conn *conn;
1794 memset(&rp, 0, sizeof(rp));
1795 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1796 rp.addr.type = cp->addr.type;
1798 if (!bdaddr_type_is_valid(cp->addr.type))
1799 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1800 MGMT_STATUS_INVALID_PARAMS,
1803 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1804 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1805 MGMT_STATUS_INVALID_PARAMS,
1810 if (!hdev_is_powered(hdev)) {
1811 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1812 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1816 if (cp->addr.type == BDADDR_BREDR)
1817 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1819 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1822 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1823 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1827 if (cp->disconnect) {
1828 if (cp->addr.type == BDADDR_BREDR)
1829 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1832 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1839 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1841 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1845 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1852 dc.handle = cpu_to_le16(conn->handle);
1853 dc.reason = 0x13; /* Remote User Terminated Connection */
1854 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1856 mgmt_pending_remove(cmd);
1859 hci_dev_unlock(hdev);
1863 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1866 struct mgmt_cp_disconnect *cp = data;
1867 struct mgmt_rp_disconnect rp;
1868 struct hci_cp_disconnect dc;
1869 struct pending_cmd *cmd;
1870 struct hci_conn *conn;
1875 memset(&rp, 0, sizeof(rp));
1876 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1877 rp.addr.type = cp->addr.type;
1879 if (!bdaddr_type_is_valid(cp->addr.type))
1880 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1881 MGMT_STATUS_INVALID_PARAMS,
1886 if (!test_bit(HCI_UP, &hdev->flags)) {
1887 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1888 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1892 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1893 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1894 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1898 if (cp->addr.type == BDADDR_BREDR)
1899 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1902 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1904 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1905 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1906 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1910 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1916 dc.handle = cpu_to_le16(conn->handle);
1917 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1919 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1921 mgmt_pending_remove(cmd);
1924 hci_dev_unlock(hdev);
1928 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1930 switch (link_type) {
1932 switch (addr_type) {
1933 case ADDR_LE_DEV_PUBLIC:
1934 return BDADDR_LE_PUBLIC;
1937 /* Fallback to LE Random address type */
1938 return BDADDR_LE_RANDOM;
1942 /* Fallback to BR/EDR type */
1943 return BDADDR_BREDR;
1947 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1950 struct mgmt_rp_get_connections *rp;
1960 if (!hdev_is_powered(hdev)) {
1961 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1962 MGMT_STATUS_NOT_POWERED);
1967 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1968 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1972 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1973 rp = kmalloc(rp_len, GFP_KERNEL);
1980 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1981 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1983 bacpy(&rp->addr[i].bdaddr, &c->dst);
1984 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1985 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1990 rp->conn_count = cpu_to_le16(i);
1992 /* Recalculate length in case of filtered SCO connections, etc */
1993 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1995 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2001 hci_dev_unlock(hdev);
2005 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2006 struct mgmt_cp_pin_code_neg_reply *cp)
2008 struct pending_cmd *cmd;
2011 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2016 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2017 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2019 mgmt_pending_remove(cmd);
2024 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2027 struct hci_conn *conn;
2028 struct mgmt_cp_pin_code_reply *cp = data;
2029 struct hci_cp_pin_code_reply reply;
2030 struct pending_cmd *cmd;
2037 if (!hdev_is_powered(hdev)) {
2038 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2039 MGMT_STATUS_NOT_POWERED);
2043 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2045 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2046 MGMT_STATUS_NOT_CONNECTED);
2050 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2051 struct mgmt_cp_pin_code_neg_reply ncp;
2053 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2055 BT_ERR("PIN code is not 16 bytes long");
2057 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2059 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2060 MGMT_STATUS_INVALID_PARAMS);
2065 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2071 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2072 reply.pin_len = cp->pin_len;
2073 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2075 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2077 mgmt_pending_remove(cmd);
2080 hci_dev_unlock(hdev);
2084 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2087 struct mgmt_cp_set_io_capability *cp = data;
2093 hdev->io_capability = cp->io_capability;
2095 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2096 hdev->io_capability);
2098 hci_dev_unlock(hdev);
2100 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2104 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2106 struct hci_dev *hdev = conn->hdev;
2107 struct pending_cmd *cmd;
2109 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2110 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2113 if (cmd->user_data != conn)
2122 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2124 struct mgmt_rp_pair_device rp;
2125 struct hci_conn *conn = cmd->user_data;
2127 bacpy(&rp.addr.bdaddr, &conn->dst);
2128 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2130 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2133 /* So we don't get further callbacks for this connection */
2134 conn->connect_cfm_cb = NULL;
2135 conn->security_cfm_cb = NULL;
2136 conn->disconn_cfm_cb = NULL;
2138 hci_conn_drop(conn);
2140 mgmt_pending_remove(cmd);
2143 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2145 struct pending_cmd *cmd;
2147 BT_DBG("status %u", status);
2149 cmd = find_pairing(conn);
2151 BT_DBG("Unable to find a pending command");
2153 pairing_complete(cmd, mgmt_status(status));
2156 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2158 struct pending_cmd *cmd;
2160 BT_DBG("status %u", status);
2165 cmd = find_pairing(conn);
2167 BT_DBG("Unable to find a pending command");
2169 pairing_complete(cmd, mgmt_status(status));
2172 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2175 struct mgmt_cp_pair_device *cp = data;
2176 struct mgmt_rp_pair_device rp;
2177 struct pending_cmd *cmd;
2178 u8 sec_level, auth_type;
2179 struct hci_conn *conn;
2184 memset(&rp, 0, sizeof(rp));
2185 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2186 rp.addr.type = cp->addr.type;
2188 if (!bdaddr_type_is_valid(cp->addr.type))
2189 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2190 MGMT_STATUS_INVALID_PARAMS,
2195 if (!hdev_is_powered(hdev)) {
2196 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2197 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2201 sec_level = BT_SECURITY_MEDIUM;
2202 if (cp->io_cap == 0x03)
2203 auth_type = HCI_AT_DEDICATED_BONDING;
2205 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2207 if (cp->addr.type == BDADDR_BREDR)
2208 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2209 cp->addr.type, sec_level, auth_type);
2211 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2212 cp->addr.type, sec_level, auth_type);
2217 if (PTR_ERR(conn) == -EBUSY)
2218 status = MGMT_STATUS_BUSY;
2220 status = MGMT_STATUS_CONNECT_FAILED;
2222 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2228 if (conn->connect_cfm_cb) {
2229 hci_conn_drop(conn);
2230 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2231 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2235 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2238 hci_conn_drop(conn);
2242 /* For LE, just connecting isn't a proof that the pairing finished */
2243 if (cp->addr.type == BDADDR_BREDR)
2244 conn->connect_cfm_cb = pairing_complete_cb;
2246 conn->connect_cfm_cb = le_connect_complete_cb;
2248 conn->security_cfm_cb = pairing_complete_cb;
2249 conn->disconn_cfm_cb = pairing_complete_cb;
2250 conn->io_capability = cp->io_cap;
2251 cmd->user_data = conn;
2253 if (conn->state == BT_CONNECTED &&
2254 hci_conn_security(conn, sec_level, auth_type))
2255 pairing_complete(cmd, 0);
2260 hci_dev_unlock(hdev);
2264 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2267 struct mgmt_addr_info *addr = data;
2268 struct pending_cmd *cmd;
2269 struct hci_conn *conn;
2276 if (!hdev_is_powered(hdev)) {
2277 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2278 MGMT_STATUS_NOT_POWERED);
2282 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2284 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2285 MGMT_STATUS_INVALID_PARAMS);
2289 conn = cmd->user_data;
2291 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2292 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2293 MGMT_STATUS_INVALID_PARAMS);
2297 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2299 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2300 addr, sizeof(*addr));
2302 hci_dev_unlock(hdev);
2306 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2307 struct mgmt_addr_info *addr, u16 mgmt_op,
2308 u16 hci_op, __le32 passkey)
2310 struct pending_cmd *cmd;
2311 struct hci_conn *conn;
2316 if (!hdev_is_powered(hdev)) {
2317 err = cmd_complete(sk, hdev->id, mgmt_op,
2318 MGMT_STATUS_NOT_POWERED, addr,
2323 if (addr->type == BDADDR_BREDR)
2324 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2326 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2329 err = cmd_complete(sk, hdev->id, mgmt_op,
2330 MGMT_STATUS_NOT_CONNECTED, addr,
2335 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2336 /* Continue with pairing via SMP. The hdev lock must be
2337 * released as SMP may try to recquire it for crypto
2340 hci_dev_unlock(hdev);
2341 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2345 err = cmd_complete(sk, hdev->id, mgmt_op,
2346 MGMT_STATUS_SUCCESS, addr,
2349 err = cmd_complete(sk, hdev->id, mgmt_op,
2350 MGMT_STATUS_FAILED, addr,
2356 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2362 /* Continue with pairing via HCI */
2363 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2364 struct hci_cp_user_passkey_reply cp;
2366 bacpy(&cp.bdaddr, &addr->bdaddr);
2367 cp.passkey = passkey;
2368 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2370 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2374 mgmt_pending_remove(cmd);
2377 hci_dev_unlock(hdev);
2381 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2382 void *data, u16 len)
2384 struct mgmt_cp_pin_code_neg_reply *cp = data;
2388 return user_pairing_resp(sk, hdev, &cp->addr,
2389 MGMT_OP_PIN_CODE_NEG_REPLY,
2390 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2393 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2396 struct mgmt_cp_user_confirm_reply *cp = data;
2400 if (len != sizeof(*cp))
2401 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2402 MGMT_STATUS_INVALID_PARAMS);
2404 return user_pairing_resp(sk, hdev, &cp->addr,
2405 MGMT_OP_USER_CONFIRM_REPLY,
2406 HCI_OP_USER_CONFIRM_REPLY, 0);
2409 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2410 void *data, u16 len)
2412 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2416 return user_pairing_resp(sk, hdev, &cp->addr,
2417 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2418 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2421 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2424 struct mgmt_cp_user_passkey_reply *cp = data;
2428 return user_pairing_resp(sk, hdev, &cp->addr,
2429 MGMT_OP_USER_PASSKEY_REPLY,
2430 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2433 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2434 void *data, u16 len)
2436 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2440 return user_pairing_resp(sk, hdev, &cp->addr,
2441 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2442 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2445 static void update_name(struct hci_request *req)
2447 struct hci_dev *hdev = req->hdev;
2448 struct hci_cp_write_local_name cp;
2450 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2452 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2455 static void set_name_complete(struct hci_dev *hdev, u8 status)
2457 struct mgmt_cp_set_local_name *cp;
2458 struct pending_cmd *cmd;
2460 BT_DBG("status 0x%02x", status);
2464 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2471 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2472 mgmt_status(status));
2474 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2477 mgmt_pending_remove(cmd);
2480 hci_dev_unlock(hdev);
2483 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2486 struct mgmt_cp_set_local_name *cp = data;
2487 struct pending_cmd *cmd;
2488 struct hci_request req;
2495 /* If the old values are the same as the new ones just return a
2496 * direct command complete event.
2498 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2499 !memcmp(hdev->short_name, cp->short_name,
2500 sizeof(hdev->short_name))) {
2501 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2506 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2508 if (!hdev_is_powered(hdev)) {
2509 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2511 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2516 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2522 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2528 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2530 hci_req_init(&req, hdev);
2532 if (lmp_bredr_capable(hdev)) {
2537 if (lmp_le_capable(hdev))
2538 hci_update_ad(&req);
2540 err = hci_req_run(&req, set_name_complete);
2542 mgmt_pending_remove(cmd);
2545 hci_dev_unlock(hdev);
2549 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2550 void *data, u16 data_len)
2552 struct pending_cmd *cmd;
2555 BT_DBG("%s", hdev->name);
2559 if (!hdev_is_powered(hdev)) {
2560 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2561 MGMT_STATUS_NOT_POWERED);
2565 if (!lmp_ssp_capable(hdev)) {
2566 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2567 MGMT_STATUS_NOT_SUPPORTED);
2571 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2572 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2577 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2583 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2585 mgmt_pending_remove(cmd);
2588 hci_dev_unlock(hdev);
2592 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2593 void *data, u16 len)
2595 struct mgmt_cp_add_remote_oob_data *cp = data;
2599 BT_DBG("%s ", hdev->name);
2603 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2606 status = MGMT_STATUS_FAILED;
2608 status = MGMT_STATUS_SUCCESS;
2610 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2611 &cp->addr, sizeof(cp->addr));
2613 hci_dev_unlock(hdev);
2617 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2618 void *data, u16 len)
2620 struct mgmt_cp_remove_remote_oob_data *cp = data;
2624 BT_DBG("%s", hdev->name);
2628 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2630 status = MGMT_STATUS_INVALID_PARAMS;
2632 status = MGMT_STATUS_SUCCESS;
2634 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2635 status, &cp->addr, sizeof(cp->addr));
2637 hci_dev_unlock(hdev);
2641 int mgmt_interleaved_discovery(struct hci_dev *hdev)
2645 BT_DBG("%s", hdev->name);
2649 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
2651 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2653 hci_dev_unlock(hdev);
2658 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2659 void *data, u16 len)
2661 struct mgmt_cp_start_discovery *cp = data;
2662 struct pending_cmd *cmd;
2665 BT_DBG("%s", hdev->name);
2669 if (!hdev_is_powered(hdev)) {
2670 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2671 MGMT_STATUS_NOT_POWERED);
2675 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2676 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2681 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2682 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2687 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2693 hdev->discovery.type = cp->type;
2695 switch (hdev->discovery.type) {
2696 case DISCOV_TYPE_BREDR:
2697 if (!lmp_bredr_capable(hdev)) {
2698 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2699 MGMT_STATUS_NOT_SUPPORTED);
2700 mgmt_pending_remove(cmd);
2704 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2707 case DISCOV_TYPE_LE:
2708 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2709 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2710 MGMT_STATUS_NOT_SUPPORTED);
2711 mgmt_pending_remove(cmd);
2715 err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT,
2716 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2719 case DISCOV_TYPE_INTERLEAVED:
2720 if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
2721 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2722 MGMT_STATUS_NOT_SUPPORTED);
2723 mgmt_pending_remove(cmd);
2727 err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT,
2728 LE_SCAN_WIN, LE_SCAN_TIMEOUT_BREDR_LE);
2732 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2733 MGMT_STATUS_INVALID_PARAMS);
2734 mgmt_pending_remove(cmd);
2739 mgmt_pending_remove(cmd);
2741 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2744 hci_dev_unlock(hdev);
2748 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2751 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2752 struct pending_cmd *cmd;
2753 struct hci_cp_remote_name_req_cancel cp;
2754 struct inquiry_entry *e;
2757 BT_DBG("%s", hdev->name);
2761 if (!hci_discovery_active(hdev)) {
2762 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2763 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2764 sizeof(mgmt_cp->type));
2768 if (hdev->discovery.type != mgmt_cp->type) {
2769 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2770 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2771 sizeof(mgmt_cp->type));
2775 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2781 switch (hdev->discovery.state) {
2782 case DISCOVERY_FINDING:
2783 if (test_bit(HCI_INQUIRY, &hdev->flags))
2784 err = hci_cancel_inquiry(hdev);
2786 err = hci_cancel_le_scan(hdev);
2790 case DISCOVERY_RESOLVING:
2791 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2794 mgmt_pending_remove(cmd);
2795 err = cmd_complete(sk, hdev->id,
2796 MGMT_OP_STOP_DISCOVERY, 0,
2798 sizeof(mgmt_cp->type));
2799 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2803 bacpy(&cp.bdaddr, &e->data.bdaddr);
2804 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2810 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2815 mgmt_pending_remove(cmd);
2817 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2820 hci_dev_unlock(hdev);
2824 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2827 struct mgmt_cp_confirm_name *cp = data;
2828 struct inquiry_entry *e;
2831 BT_DBG("%s", hdev->name);
2835 if (!hci_discovery_active(hdev)) {
2836 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2837 MGMT_STATUS_FAILED);
2841 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2843 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2844 MGMT_STATUS_INVALID_PARAMS);
2848 if (cp->name_known) {
2849 e->name_state = NAME_KNOWN;
2852 e->name_state = NAME_NEEDED;
2853 hci_inquiry_cache_update_resolve(hdev, e);
2856 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2860 hci_dev_unlock(hdev);
2864 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2867 struct mgmt_cp_block_device *cp = data;
2871 BT_DBG("%s", hdev->name);
2873 if (!bdaddr_type_is_valid(cp->addr.type))
2874 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2875 MGMT_STATUS_INVALID_PARAMS,
2876 &cp->addr, sizeof(cp->addr));
2880 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2882 status = MGMT_STATUS_FAILED;
2884 status = MGMT_STATUS_SUCCESS;
2886 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2887 &cp->addr, sizeof(cp->addr));
2889 hci_dev_unlock(hdev);
2894 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2897 struct mgmt_cp_unblock_device *cp = data;
2901 BT_DBG("%s", hdev->name);
2903 if (!bdaddr_type_is_valid(cp->addr.type))
2904 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
2905 MGMT_STATUS_INVALID_PARAMS,
2906 &cp->addr, sizeof(cp->addr));
2910 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2912 status = MGMT_STATUS_INVALID_PARAMS;
2914 status = MGMT_STATUS_SUCCESS;
2916 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2917 &cp->addr, sizeof(cp->addr));
2919 hci_dev_unlock(hdev);
2924 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2927 struct mgmt_cp_set_device_id *cp = data;
2928 struct hci_request req;
2932 BT_DBG("%s", hdev->name);
2934 source = __le16_to_cpu(cp->source);
2936 if (source > 0x0002)
2937 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2938 MGMT_STATUS_INVALID_PARAMS);
2942 hdev->devid_source = source;
2943 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2944 hdev->devid_product = __le16_to_cpu(cp->product);
2945 hdev->devid_version = __le16_to_cpu(cp->version);
2947 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2949 hci_req_init(&req, hdev);
2951 hci_req_run(&req, NULL);
2953 hci_dev_unlock(hdev);
2958 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
2960 struct pending_cmd *cmd;
2962 BT_DBG("status 0x%02x", status);
2966 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2971 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2972 mgmt_status(status));
2974 struct mgmt_mode *cp = cmd->param;
2977 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
2979 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
2981 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2982 new_settings(hdev, cmd->sk);
2985 mgmt_pending_remove(cmd);
2988 hci_dev_unlock(hdev);
2991 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2992 void *data, u16 len)
2994 struct mgmt_mode *cp = data;
2995 struct pending_cmd *cmd;
2996 struct hci_request req;
2999 BT_DBG("%s", hdev->name);
3001 if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
3002 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3003 MGMT_STATUS_NOT_SUPPORTED);
3005 if (cp->val != 0x00 && cp->val != 0x01)
3006 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3007 MGMT_STATUS_INVALID_PARAMS);
3009 if (!hdev_is_powered(hdev))
3010 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3011 MGMT_STATUS_NOT_POWERED);
3013 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3014 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3015 MGMT_STATUS_REJECTED);
3019 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3020 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3025 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3026 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3031 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3038 hci_req_init(&req, hdev);
3040 write_fast_connectable(&req, cp->val);
3042 err = hci_req_run(&req, fast_connectable_complete);
3044 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3045 MGMT_STATUS_FAILED);
3046 mgmt_pending_remove(cmd);
3050 hci_dev_unlock(hdev);
3055 static bool ltk_is_valid(struct mgmt_ltk_info *key)
3057 if (key->authenticated != 0x00 && key->authenticated != 0x01)
3059 if (key->master != 0x00 && key->master != 0x01)
3061 if (!bdaddr_type_is_le(key->addr.type))
3066 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3067 void *cp_data, u16 len)
3069 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3070 u16 key_count, expected_len;
3073 key_count = __le16_to_cpu(cp->key_count);
3075 expected_len = sizeof(*cp) + key_count *
3076 sizeof(struct mgmt_ltk_info);
3077 if (expected_len != len) {
3078 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3080 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3081 MGMT_STATUS_INVALID_PARAMS);
3084 BT_DBG("%s key_count %u", hdev->name, key_count);
3086 for (i = 0; i < key_count; i++) {
3087 struct mgmt_ltk_info *key = &cp->keys[i];
3089 if (!ltk_is_valid(key))
3090 return cmd_status(sk, hdev->id,
3091 MGMT_OP_LOAD_LONG_TERM_KEYS,
3092 MGMT_STATUS_INVALID_PARAMS);
3097 hci_smp_ltks_clear(hdev);
3099 for (i = 0; i < key_count; i++) {
3100 struct mgmt_ltk_info *key = &cp->keys[i];
3106 type = HCI_SMP_LTK_SLAVE;
3108 hci_add_ltk(hdev, &key->addr.bdaddr,
3109 bdaddr_to_le(key->addr.type),
3110 type, 0, key->authenticated, key->val,
3111 key->enc_size, key->ediv, key->rand);
3114 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3117 hci_dev_unlock(hdev);
3122 static const struct mgmt_handler {
3123 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3127 } mgmt_handlers[] = {
3128 { NULL }, /* 0x0000 (no command) */
3129 { read_version, false, MGMT_READ_VERSION_SIZE },
3130 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3131 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3132 { read_controller_info, false, MGMT_READ_INFO_SIZE },
3133 { set_powered, false, MGMT_SETTING_SIZE },
3134 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
3135 { set_connectable, false, MGMT_SETTING_SIZE },
3136 { set_fast_connectable, false, MGMT_SETTING_SIZE },
3137 { set_pairable, false, MGMT_SETTING_SIZE },
3138 { set_link_security, false, MGMT_SETTING_SIZE },
3139 { set_ssp, false, MGMT_SETTING_SIZE },
3140 { set_hs, false, MGMT_SETTING_SIZE },
3141 { set_le, false, MGMT_SETTING_SIZE },
3142 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
3143 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
3144 { add_uuid, false, MGMT_ADD_UUID_SIZE },
3145 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
3146 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
3147 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
3148 { disconnect, false, MGMT_DISCONNECT_SIZE },
3149 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
3150 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
3151 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
3152 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
3153 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
3154 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
3155 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
3156 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
3157 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
3158 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
3159 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
3160 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
3161 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
3162 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
3163 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3164 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3165 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3166 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3167 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3168 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3172 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3176 struct mgmt_hdr *hdr;
3177 u16 opcode, index, len;
3178 struct hci_dev *hdev = NULL;
3179 const struct mgmt_handler *handler;
3182 BT_DBG("got %zu bytes", msglen);
3184 if (msglen < sizeof(*hdr))
3187 buf = kmalloc(msglen, GFP_KERNEL);
3191 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3197 opcode = __le16_to_cpu(hdr->opcode);
3198 index = __le16_to_cpu(hdr->index);
3199 len = __le16_to_cpu(hdr->len);
3201 if (len != msglen - sizeof(*hdr)) {
3206 if (index != MGMT_INDEX_NONE) {
3207 hdev = hci_dev_get(index);
3209 err = cmd_status(sk, index, opcode,
3210 MGMT_STATUS_INVALID_INDEX);
3215 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3216 mgmt_handlers[opcode].func == NULL) {
3217 BT_DBG("Unknown op %u", opcode);
3218 err = cmd_status(sk, index, opcode,
3219 MGMT_STATUS_UNKNOWN_COMMAND);
3223 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3224 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3225 err = cmd_status(sk, index, opcode,
3226 MGMT_STATUS_INVALID_INDEX);
3230 handler = &mgmt_handlers[opcode];
3232 if ((handler->var_len && len < handler->data_len) ||
3233 (!handler->var_len && len != handler->data_len)) {
3234 err = cmd_status(sk, index, opcode,
3235 MGMT_STATUS_INVALID_PARAMS);
3240 mgmt_init_hdev(sk, hdev);
3242 cp = buf + sizeof(*hdr);
3244 err = handler->func(sk, hdev, cp, len);
3258 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
3262 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
3263 mgmt_pending_remove(cmd);
3266 int mgmt_index_added(struct hci_dev *hdev)
3268 if (!mgmt_valid_hdev(hdev))
3271 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3274 int mgmt_index_removed(struct hci_dev *hdev)
3276 u8 status = MGMT_STATUS_INVALID_INDEX;
3278 if (!mgmt_valid_hdev(hdev))
3281 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3283 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3288 struct hci_dev *hdev;
3292 static void settings_rsp(struct pending_cmd *cmd, void *data)
3294 struct cmd_lookup *match = data;
3296 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
3298 list_del(&cmd->list);
3300 if (match->sk == NULL) {
3301 match->sk = cmd->sk;
3302 sock_hold(match->sk);
3305 mgmt_pending_free(cmd);
3308 static void set_bredr_scan(struct hci_request *req)
3310 struct hci_dev *hdev = req->hdev;
3313 /* Ensure that fast connectable is disabled. This function will
3314 * not do anything if the page scan parameters are already what
3317 write_fast_connectable(req, false);
3319 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3321 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3322 scan |= SCAN_INQUIRY;
3325 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3328 static void powered_complete(struct hci_dev *hdev, u8 status)
3330 struct cmd_lookup match = { NULL, hdev };
3332 BT_DBG("status 0x%02x", status);
3336 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3338 new_settings(hdev, match.sk);
3340 hci_dev_unlock(hdev);
3346 static int powered_update_hci(struct hci_dev *hdev)
3348 struct hci_request req;
3351 hci_req_init(&req, hdev);
3353 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3354 !lmp_host_ssp_capable(hdev)) {
3357 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3360 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
3361 lmp_bredr_capable(hdev)) {
3362 struct hci_cp_write_le_host_supported cp;
3365 cp.simul = lmp_le_br_capable(hdev);
3367 /* Check first if we already have the right
3368 * host state (host features set)
3370 if (cp.le != lmp_host_le_capable(hdev) ||
3371 cp.simul != lmp_host_le_br_capable(hdev))
3372 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3376 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3377 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3378 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3379 sizeof(link_sec), &link_sec);
3381 if (lmp_bredr_capable(hdev)) {
3382 set_bredr_scan(&req);
3388 return hci_req_run(&req, powered_complete);
3391 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3393 struct cmd_lookup match = { NULL, hdev };
3394 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3395 u8 zero_cod[] = { 0, 0, 0 };
3398 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3402 if (powered_update_hci(hdev) == 0)
3405 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3410 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3411 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3413 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3414 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3415 zero_cod, sizeof(zero_cod), NULL);
3418 err = new_settings(hdev, match.sk);
3426 int mgmt_set_powered_failed(struct hci_dev *hdev, int err)
3428 struct pending_cmd *cmd;
3431 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
3435 if (err == -ERFKILL)
3436 status = MGMT_STATUS_RFKILLED;
3438 status = MGMT_STATUS_FAILED;
3440 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
3442 mgmt_pending_remove(cmd);
3447 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3449 struct cmd_lookup match = { NULL, hdev };
3450 bool changed = false;
3454 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3457 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3461 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3465 err = new_settings(hdev, match.sk);
3473 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3475 struct pending_cmd *cmd;
3476 bool changed = false;
3480 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3483 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3487 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
3490 err = new_settings(hdev, cmd ? cmd->sk : NULL);
3495 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3497 u8 mgmt_err = mgmt_status(status);
3499 if (scan & SCAN_PAGE)
3500 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3501 cmd_status_rsp, &mgmt_err);
3503 if (scan & SCAN_INQUIRY)
3504 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3505 cmd_status_rsp, &mgmt_err);
3510 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3513 struct mgmt_ev_new_link_key ev;
3515 memset(&ev, 0, sizeof(ev));
3517 ev.store_hint = persistent;
3518 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3519 ev.key.addr.type = BDADDR_BREDR;
3520 ev.key.type = key->type;
3521 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3522 ev.key.pin_len = key->pin_len;
3524 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3527 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3529 struct mgmt_ev_new_long_term_key ev;
3531 memset(&ev, 0, sizeof(ev));
3533 ev.store_hint = persistent;
3534 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3535 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3536 ev.key.authenticated = key->authenticated;
3537 ev.key.enc_size = key->enc_size;
3538 ev.key.ediv = key->ediv;
3540 if (key->type == HCI_SMP_LTK)
3543 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3544 memcpy(ev.key.val, key->val, sizeof(key->val));
3546 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3550 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3551 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3555 struct mgmt_ev_device_connected *ev = (void *) buf;
3558 bacpy(&ev->addr.bdaddr, bdaddr);
3559 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3561 ev->flags = __cpu_to_le32(flags);
3564 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3567 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3568 eir_len = eir_append_data(ev->eir, eir_len,
3569 EIR_CLASS_OF_DEV, dev_class, 3);
3571 ev->eir_len = cpu_to_le16(eir_len);
3573 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3574 sizeof(*ev) + eir_len, NULL);
3577 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3579 struct mgmt_cp_disconnect *cp = cmd->param;
3580 struct sock **sk = data;
3581 struct mgmt_rp_disconnect rp;
3583 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3584 rp.addr.type = cp->addr.type;
3586 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3592 mgmt_pending_remove(cmd);
3595 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3597 struct hci_dev *hdev = data;
3598 struct mgmt_cp_unpair_device *cp = cmd->param;
3599 struct mgmt_rp_unpair_device rp;
3601 memset(&rp, 0, sizeof(rp));
3602 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3603 rp.addr.type = cp->addr.type;
3605 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3607 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3609 mgmt_pending_remove(cmd);
3612 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3613 u8 link_type, u8 addr_type, u8 reason)
3615 struct mgmt_ev_device_disconnected ev;
3616 struct sock *sk = NULL;
3619 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3621 bacpy(&ev.addr.bdaddr, bdaddr);
3622 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3625 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3631 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3637 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3638 u8 link_type, u8 addr_type, u8 status)
3640 struct mgmt_rp_disconnect rp;
3641 struct pending_cmd *cmd;
3644 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3647 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3651 bacpy(&rp.addr.bdaddr, bdaddr);
3652 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3654 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3655 mgmt_status(status), &rp, sizeof(rp));
3657 mgmt_pending_remove(cmd);
3662 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3663 u8 addr_type, u8 status)
3665 struct mgmt_ev_connect_failed ev;
3667 bacpy(&ev.addr.bdaddr, bdaddr);
3668 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3669 ev.status = mgmt_status(status);
3671 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3674 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3676 struct mgmt_ev_pin_code_request ev;
3678 bacpy(&ev.addr.bdaddr, bdaddr);
3679 ev.addr.type = BDADDR_BREDR;
3682 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3686 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3689 struct pending_cmd *cmd;
3690 struct mgmt_rp_pin_code_reply rp;
3693 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3697 bacpy(&rp.addr.bdaddr, bdaddr);
3698 rp.addr.type = BDADDR_BREDR;
3700 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3701 mgmt_status(status), &rp, sizeof(rp));
3703 mgmt_pending_remove(cmd);
3708 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3711 struct pending_cmd *cmd;
3712 struct mgmt_rp_pin_code_reply rp;
3715 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3719 bacpy(&rp.addr.bdaddr, bdaddr);
3720 rp.addr.type = BDADDR_BREDR;
3722 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3723 mgmt_status(status), &rp, sizeof(rp));
3725 mgmt_pending_remove(cmd);
3730 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3731 u8 link_type, u8 addr_type, __le32 value,
3734 struct mgmt_ev_user_confirm_request ev;
3736 BT_DBG("%s", hdev->name);
3738 bacpy(&ev.addr.bdaddr, bdaddr);
3739 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3740 ev.confirm_hint = confirm_hint;
3743 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3747 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3748 u8 link_type, u8 addr_type)
3750 struct mgmt_ev_user_passkey_request ev;
3752 BT_DBG("%s", hdev->name);
3754 bacpy(&ev.addr.bdaddr, bdaddr);
3755 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3757 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3761 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3762 u8 link_type, u8 addr_type, u8 status,
3765 struct pending_cmd *cmd;
3766 struct mgmt_rp_user_confirm_reply rp;
3769 cmd = mgmt_pending_find(opcode, hdev);
3773 bacpy(&rp.addr.bdaddr, bdaddr);
3774 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3775 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3778 mgmt_pending_remove(cmd);
3783 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3784 u8 link_type, u8 addr_type, u8 status)
3786 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3787 status, MGMT_OP_USER_CONFIRM_REPLY);
3790 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3791 u8 link_type, u8 addr_type, u8 status)
3793 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3795 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3798 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3799 u8 link_type, u8 addr_type, u8 status)
3801 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3802 status, MGMT_OP_USER_PASSKEY_REPLY);
3805 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3806 u8 link_type, u8 addr_type, u8 status)
3808 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3810 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3813 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3814 u8 link_type, u8 addr_type, u32 passkey,
3817 struct mgmt_ev_passkey_notify ev;
3819 BT_DBG("%s", hdev->name);
3821 bacpy(&ev.addr.bdaddr, bdaddr);
3822 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3823 ev.passkey = __cpu_to_le32(passkey);
3824 ev.entered = entered;
3826 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3829 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3830 u8 addr_type, u8 status)
3832 struct mgmt_ev_auth_failed ev;
3834 bacpy(&ev.addr.bdaddr, bdaddr);
3835 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3836 ev.status = mgmt_status(status);
3838 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3841 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3843 struct cmd_lookup match = { NULL, hdev };
3844 bool changed = false;
3848 u8 mgmt_err = mgmt_status(status);
3849 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3850 cmd_status_rsp, &mgmt_err);
3854 if (test_bit(HCI_AUTH, &hdev->flags)) {
3855 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3858 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3862 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3866 err = new_settings(hdev, match.sk);
3874 static void clear_eir(struct hci_request *req)
3876 struct hci_dev *hdev = req->hdev;
3877 struct hci_cp_write_eir cp;
3879 if (!lmp_ext_inq_capable(hdev))
3882 memset(hdev->eir, 0, sizeof(hdev->eir));
3884 memset(&cp, 0, sizeof(cp));
3886 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3889 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3891 struct cmd_lookup match = { NULL, hdev };
3892 struct hci_request req;
3893 bool changed = false;
3897 u8 mgmt_err = mgmt_status(status);
3899 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
3901 err = new_settings(hdev, NULL);
3903 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3910 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3913 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3917 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
3920 err = new_settings(hdev, match.sk);
3925 hci_req_init(&req, hdev);
3927 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3932 hci_req_run(&req, NULL);
3937 static void sk_lookup(struct pending_cmd *cmd, void *data)
3939 struct cmd_lookup *match = data;
3941 if (match->sk == NULL) {
3942 match->sk = cmd->sk;
3943 sock_hold(match->sk);
3947 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3950 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3953 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
3954 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
3955 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
3958 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3967 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
3969 struct mgmt_cp_set_local_name ev;
3970 struct pending_cmd *cmd;
3975 memset(&ev, 0, sizeof(ev));
3976 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3977 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
3979 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3981 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3983 /* If this is a HCI command related to powering on the
3984 * HCI dev don't send any mgmt signals.
3986 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
3990 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
3991 cmd ? cmd->sk : NULL);
3994 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
3995 u8 *randomizer, u8 status)
3997 struct pending_cmd *cmd;
4000 BT_DBG("%s status %u", hdev->name, status);
4002 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4007 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4008 mgmt_status(status));
4010 struct mgmt_rp_read_local_oob_data rp;
4012 memcpy(rp.hash, hash, sizeof(rp.hash));
4013 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
4015 err = cmd_complete(cmd->sk, hdev->id,
4016 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
4020 mgmt_pending_remove(cmd);
4025 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4027 struct cmd_lookup match = { NULL, hdev };
4028 bool changed = false;
4032 u8 mgmt_err = mgmt_status(status);
4034 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
4036 err = new_settings(hdev, NULL);
4038 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
4045 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4048 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4052 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
4055 err = new_settings(hdev, match.sk);
4063 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4064 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4065 ssp, u8 *eir, u16 eir_len)
4068 struct mgmt_ev_device_found *ev = (void *) buf;
4071 /* Leave 5 bytes for a potential CoD field */
4072 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4075 memset(buf, 0, sizeof(buf));
4077 bacpy(&ev->addr.bdaddr, bdaddr);
4078 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4081 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4083 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4086 memcpy(ev->eir, eir, eir_len);
4088 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4089 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4092 ev->eir_len = cpu_to_le16(eir_len);
4093 ev_size = sizeof(*ev) + eir_len;
4095 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4098 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4099 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4101 struct mgmt_ev_device_found *ev;
4102 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4105 ev = (struct mgmt_ev_device_found *) buf;
4107 memset(buf, 0, sizeof(buf));
4109 bacpy(&ev->addr.bdaddr, bdaddr);
4110 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4113 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4116 ev->eir_len = cpu_to_le16(eir_len);
4118 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
4119 sizeof(*ev) + eir_len, NULL);
4122 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
4124 struct pending_cmd *cmd;
4128 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4130 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4134 type = hdev->discovery.type;
4136 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4137 &type, sizeof(type));
4138 mgmt_pending_remove(cmd);
4143 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
4145 struct pending_cmd *cmd;
4148 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4152 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4153 &hdev->discovery.type, sizeof(hdev->discovery.type));
4154 mgmt_pending_remove(cmd);
4159 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4161 struct mgmt_ev_discovering ev;
4162 struct pending_cmd *cmd;
4164 BT_DBG("%s discovering %u", hdev->name, discovering);
4167 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4169 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4172 u8 type = hdev->discovery.type;
4174 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4176 mgmt_pending_remove(cmd);
4179 memset(&ev, 0, sizeof(ev));
4180 ev.type = hdev->discovery.type;
4181 ev.discovering = discovering;
4183 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4186 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4188 struct pending_cmd *cmd;
4189 struct mgmt_ev_device_blocked ev;
4191 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4193 bacpy(&ev.addr.bdaddr, bdaddr);
4194 ev.addr.type = type;
4196 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4197 cmd ? cmd->sk : NULL);
4200 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4202 struct pending_cmd *cmd;
4203 struct mgmt_ev_device_unblocked ev;
4205 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4207 bacpy(&ev.addr.bdaddr, bdaddr);
4208 ev.addr.type = type;
4210 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4211 cmd ? cmd->sk : NULL);
4214 module_param(enable_hs, bool, 0644);
4215 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");