2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
84 MGMT_EV_INDEX_REMOVED,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
106 * These LE scan and inquiry parameters were chosen according to LE General
107 * Discovery Procedure specification.
109 #define LE_SCAN_TYPE 0x01
110 #define LE_SCAN_WIN 0x12
111 #define LE_SCAN_INT 0x12
112 #define LE_SCAN_TIMEOUT_LE_ONLY 10240 /* TGAP(gen_disc_scan_min) */
113 #define LE_SCAN_TIMEOUT_BREDR_LE 5120 /* TGAP(100)/2 */
115 #define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
116 #define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
118 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
120 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
121 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
124 struct list_head list;
132 /* HCI to MGMT error code conversion table */
133 static u8 mgmt_status_table[] = {
135 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
136 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
137 MGMT_STATUS_FAILED, /* Hardware Failure */
138 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
139 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
140 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
141 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
142 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
143 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
144 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
145 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
146 MGMT_STATUS_BUSY, /* Command Disallowed */
147 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
148 MGMT_STATUS_REJECTED, /* Rejected Security */
149 MGMT_STATUS_REJECTED, /* Rejected Personal */
150 MGMT_STATUS_TIMEOUT, /* Host Timeout */
151 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
152 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
153 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
154 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
155 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
156 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
157 MGMT_STATUS_BUSY, /* Repeated Attempts */
158 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
159 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
161 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
162 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
163 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
165 MGMT_STATUS_FAILED, /* Unspecified Error */
166 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
167 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
168 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
169 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
170 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
171 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
172 MGMT_STATUS_FAILED, /* Unit Link Key Used */
173 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
174 MGMT_STATUS_TIMEOUT, /* Instant Passed */
175 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
176 MGMT_STATUS_FAILED, /* Transaction Collision */
177 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
178 MGMT_STATUS_REJECTED, /* QoS Rejected */
179 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
180 MGMT_STATUS_REJECTED, /* Insufficient Security */
181 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
182 MGMT_STATUS_BUSY, /* Role Switch Pending */
183 MGMT_STATUS_FAILED, /* Slot Violation */
184 MGMT_STATUS_FAILED, /* Role Switch Failed */
185 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
186 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
187 MGMT_STATUS_BUSY, /* Host Busy Pairing */
188 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
189 MGMT_STATUS_BUSY, /* Controller Busy */
190 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
191 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
192 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
193 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
194 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
197 bool mgmt_valid_hdev(struct hci_dev *hdev)
199 return hdev->dev_type == HCI_BREDR;
202 static u8 mgmt_status(u8 hci_status)
204 if (hci_status < ARRAY_SIZE(mgmt_status_table))
205 return mgmt_status_table[hci_status];
207 return MGMT_STATUS_FAILED;
210 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
213 struct mgmt_hdr *hdr;
214 struct mgmt_ev_cmd_status *ev;
217 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
219 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
223 hdr = (void *) skb_put(skb, sizeof(*hdr));
225 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
226 hdr->index = cpu_to_le16(index);
227 hdr->len = cpu_to_le16(sizeof(*ev));
229 ev = (void *) skb_put(skb, sizeof(*ev));
231 ev->opcode = cpu_to_le16(cmd);
233 err = sock_queue_rcv_skb(sk, skb);
240 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
241 void *rp, size_t rp_len)
244 struct mgmt_hdr *hdr;
245 struct mgmt_ev_cmd_complete *ev;
248 BT_DBG("sock %p", sk);
250 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
254 hdr = (void *) skb_put(skb, sizeof(*hdr));
256 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
257 hdr->index = cpu_to_le16(index);
258 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
260 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
261 ev->opcode = cpu_to_le16(cmd);
265 memcpy(ev->data, rp, rp_len);
267 err = sock_queue_rcv_skb(sk, skb);
274 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
277 struct mgmt_rp_read_version rp;
279 BT_DBG("sock %p", sk);
281 rp.version = MGMT_VERSION;
282 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
284 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
288 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
291 struct mgmt_rp_read_commands *rp;
292 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
293 const u16 num_events = ARRAY_SIZE(mgmt_events);
298 BT_DBG("sock %p", sk);
300 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
302 rp = kmalloc(rp_size, GFP_KERNEL);
306 rp->num_commands = __constant_cpu_to_le16(num_commands);
307 rp->num_events = __constant_cpu_to_le16(num_events);
309 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
310 put_unaligned_le16(mgmt_commands[i], opcode);
312 for (i = 0; i < num_events; i++, opcode++)
313 put_unaligned_le16(mgmt_events[i], opcode);
315 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
322 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
325 struct mgmt_rp_read_index_list *rp;
331 BT_DBG("sock %p", sk);
333 read_lock(&hci_dev_list_lock);
336 list_for_each_entry(d, &hci_dev_list, list) {
337 if (!mgmt_valid_hdev(d))
343 rp_len = sizeof(*rp) + (2 * count);
344 rp = kmalloc(rp_len, GFP_ATOMIC);
346 read_unlock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (test_bit(HCI_SETUP, &d->dev_flags))
355 if (!mgmt_valid_hdev(d))
358 rp->index[count++] = cpu_to_le16(d->id);
359 BT_DBG("Added hci%u", d->id);
362 rp->num_controllers = cpu_to_le16(count);
363 rp_len = sizeof(*rp) + (2 * count);
365 read_unlock(&hci_dev_list_lock);
367 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
375 static u32 get_supported_settings(struct hci_dev *hdev)
379 settings |= MGMT_SETTING_POWERED;
380 settings |= MGMT_SETTING_PAIRABLE;
382 if (lmp_ssp_capable(hdev))
383 settings |= MGMT_SETTING_SSP;
385 if (lmp_bredr_capable(hdev)) {
386 settings |= MGMT_SETTING_CONNECTABLE;
387 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
388 settings |= MGMT_SETTING_FAST_CONNECTABLE;
389 settings |= MGMT_SETTING_DISCOVERABLE;
390 settings |= MGMT_SETTING_BREDR;
391 settings |= MGMT_SETTING_LINK_SECURITY;
395 settings |= MGMT_SETTING_HS;
397 if (lmp_le_capable(hdev))
398 settings |= MGMT_SETTING_LE;
403 static u32 get_current_settings(struct hci_dev *hdev)
407 if (hdev_is_powered(hdev))
408 settings |= MGMT_SETTING_POWERED;
410 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
411 settings |= MGMT_SETTING_CONNECTABLE;
413 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
414 settings |= MGMT_SETTING_DISCOVERABLE;
416 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
417 settings |= MGMT_SETTING_PAIRABLE;
419 if (lmp_bredr_capable(hdev))
420 settings |= MGMT_SETTING_BREDR;
422 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
423 settings |= MGMT_SETTING_LE;
425 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
426 settings |= MGMT_SETTING_LINK_SECURITY;
428 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
429 settings |= MGMT_SETTING_SSP;
431 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
432 settings |= MGMT_SETTING_HS;
437 #define PNP_INFO_SVCLASS_ID 0x1200
439 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
441 u8 *ptr = data, *uuids_start = NULL;
442 struct bt_uuid *uuid;
447 list_for_each_entry(uuid, &hdev->uuids, list) {
450 if (uuid->size != 16)
453 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
457 if (uuid16 == PNP_INFO_SVCLASS_ID)
463 uuids_start[1] = EIR_UUID16_ALL;
467 /* Stop if not enough space to put next UUID */
468 if ((ptr - data) + sizeof(u16) > len) {
469 uuids_start[1] = EIR_UUID16_SOME;
473 *ptr++ = (uuid16 & 0x00ff);
474 *ptr++ = (uuid16 & 0xff00) >> 8;
475 uuids_start[0] += sizeof(uuid16);
481 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
483 u8 *ptr = data, *uuids_start = NULL;
484 struct bt_uuid *uuid;
489 list_for_each_entry(uuid, &hdev->uuids, list) {
490 if (uuid->size != 32)
496 uuids_start[1] = EIR_UUID32_ALL;
500 /* Stop if not enough space to put next UUID */
501 if ((ptr - data) + sizeof(u32) > len) {
502 uuids_start[1] = EIR_UUID32_SOME;
506 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
508 uuids_start[0] += sizeof(u32);
514 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
516 u8 *ptr = data, *uuids_start = NULL;
517 struct bt_uuid *uuid;
522 list_for_each_entry(uuid, &hdev->uuids, list) {
523 if (uuid->size != 128)
529 uuids_start[1] = EIR_UUID128_ALL;
533 /* Stop if not enough space to put next UUID */
534 if ((ptr - data) + 16 > len) {
535 uuids_start[1] = EIR_UUID128_SOME;
539 memcpy(ptr, uuid->uuid, 16);
541 uuids_start[0] += 16;
547 static void create_eir(struct hci_dev *hdev, u8 *data)
552 name_len = strlen(hdev->dev_name);
558 ptr[1] = EIR_NAME_SHORT;
560 ptr[1] = EIR_NAME_COMPLETE;
562 /* EIR Data length */
563 ptr[0] = name_len + 1;
565 memcpy(ptr + 2, hdev->dev_name, name_len);
567 ptr += (name_len + 2);
570 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
572 ptr[1] = EIR_TX_POWER;
573 ptr[2] = (u8) hdev->inq_tx_power;
578 if (hdev->devid_source > 0) {
580 ptr[1] = EIR_DEVICE_ID;
582 put_unaligned_le16(hdev->devid_source, ptr + 2);
583 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
584 put_unaligned_le16(hdev->devid_product, ptr + 6);
585 put_unaligned_le16(hdev->devid_version, ptr + 8);
590 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
591 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
592 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
595 static void update_eir(struct hci_request *req)
597 struct hci_dev *hdev = req->hdev;
598 struct hci_cp_write_eir cp;
600 if (!hdev_is_powered(hdev))
603 if (!lmp_ext_inq_capable(hdev))
606 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
609 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
612 memset(&cp, 0, sizeof(cp));
614 create_eir(hdev, cp.data);
616 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
619 memcpy(hdev->eir, cp.data, sizeof(cp.data));
621 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
624 static u8 get_service_classes(struct hci_dev *hdev)
626 struct bt_uuid *uuid;
629 list_for_each_entry(uuid, &hdev->uuids, list)
630 val |= uuid->svc_hint;
635 static void update_class(struct hci_request *req)
637 struct hci_dev *hdev = req->hdev;
640 BT_DBG("%s", hdev->name);
642 if (!hdev_is_powered(hdev))
645 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
648 cod[0] = hdev->minor_class;
649 cod[1] = hdev->major_class;
650 cod[2] = get_service_classes(hdev);
652 if (memcmp(cod, hdev->dev_class, 3) == 0)
655 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
658 static void service_cache_off(struct work_struct *work)
660 struct hci_dev *hdev = container_of(work, struct hci_dev,
662 struct hci_request req;
664 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
667 hci_req_init(&req, hdev);
674 hci_dev_unlock(hdev);
676 hci_req_run(&req, NULL);
679 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
681 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
684 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
686 /* Non-mgmt controlled devices get this bit set
687 * implicitly so that pairing works for them, however
688 * for mgmt we require user-space to explicitly enable
691 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
694 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
695 void *data, u16 data_len)
697 struct mgmt_rp_read_info rp;
699 BT_DBG("sock %p %s", sk, hdev->name);
703 memset(&rp, 0, sizeof(rp));
705 bacpy(&rp.bdaddr, &hdev->bdaddr);
707 rp.version = hdev->hci_ver;
708 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
710 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
711 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
713 memcpy(rp.dev_class, hdev->dev_class, 3);
715 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
716 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
718 hci_dev_unlock(hdev);
720 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
724 static void mgmt_pending_free(struct pending_cmd *cmd)
731 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
732 struct hci_dev *hdev, void *data,
735 struct pending_cmd *cmd;
737 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
741 cmd->opcode = opcode;
742 cmd->index = hdev->id;
744 cmd->param = kmalloc(len, GFP_KERNEL);
751 memcpy(cmd->param, data, len);
756 list_add(&cmd->list, &hdev->mgmt_pending);
761 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
762 void (*cb)(struct pending_cmd *cmd,
766 struct pending_cmd *cmd, *tmp;
768 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
769 if (opcode > 0 && cmd->opcode != opcode)
776 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
778 struct pending_cmd *cmd;
780 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
781 if (cmd->opcode == opcode)
788 static void mgmt_pending_remove(struct pending_cmd *cmd)
790 list_del(&cmd->list);
791 mgmt_pending_free(cmd);
794 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
796 __le32 settings = cpu_to_le32(get_current_settings(hdev));
798 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
802 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
805 struct mgmt_mode *cp = data;
806 struct pending_cmd *cmd;
809 BT_DBG("request for %s", hdev->name);
811 if (cp->val != 0x00 && cp->val != 0x01)
812 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
813 MGMT_STATUS_INVALID_PARAMS);
817 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
818 cancel_delayed_work(&hdev->power_off);
821 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
823 err = mgmt_powered(hdev, 1);
828 if (!!cp->val == hdev_is_powered(hdev)) {
829 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
833 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
834 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
839 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
846 queue_work(hdev->req_workqueue, &hdev->power_on);
848 queue_work(hdev->req_workqueue, &hdev->power_off.work);
853 hci_dev_unlock(hdev);
857 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
858 struct sock *skip_sk)
861 struct mgmt_hdr *hdr;
863 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
867 hdr = (void *) skb_put(skb, sizeof(*hdr));
868 hdr->opcode = cpu_to_le16(event);
870 hdr->index = cpu_to_le16(hdev->id);
872 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
873 hdr->len = cpu_to_le16(data_len);
876 memcpy(skb_put(skb, data_len), data, data_len);
879 __net_timestamp(skb);
881 hci_send_to_control(skb, skip_sk);
887 static int new_settings(struct hci_dev *hdev, struct sock *skip)
891 ev = cpu_to_le32(get_current_settings(hdev));
893 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
896 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
899 struct mgmt_cp_set_discoverable *cp = data;
900 struct pending_cmd *cmd;
905 BT_DBG("request for %s", hdev->name);
907 if (!lmp_bredr_capable(hdev))
908 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
909 MGMT_STATUS_NOT_SUPPORTED);
911 if (cp->val != 0x00 && cp->val != 0x01)
912 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
913 MGMT_STATUS_INVALID_PARAMS);
915 timeout = __le16_to_cpu(cp->timeout);
916 if (!cp->val && timeout > 0)
917 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
918 MGMT_STATUS_INVALID_PARAMS);
922 if (!hdev_is_powered(hdev) && timeout > 0) {
923 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
924 MGMT_STATUS_NOT_POWERED);
928 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
929 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
930 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
935 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
936 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
937 MGMT_STATUS_REJECTED);
941 if (!hdev_is_powered(hdev)) {
942 bool changed = false;
944 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
945 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
949 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
954 err = new_settings(hdev, sk);
959 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
960 if (hdev->discov_timeout > 0) {
961 cancel_delayed_work(&hdev->discov_off);
962 hdev->discov_timeout = 0;
965 if (cp->val && timeout > 0) {
966 hdev->discov_timeout = timeout;
967 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
968 msecs_to_jiffies(hdev->discov_timeout * 1000));
971 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
975 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
984 scan |= SCAN_INQUIRY;
986 cancel_delayed_work(&hdev->discov_off);
988 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
990 mgmt_pending_remove(cmd);
993 hdev->discov_timeout = timeout;
996 hci_dev_unlock(hdev);
1000 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1002 struct pending_cmd *cmd;
1004 BT_DBG("status 0x%02x", status);
1008 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1012 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1014 mgmt_pending_remove(cmd);
1017 hci_dev_unlock(hdev);
1020 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1023 struct mgmt_mode *cp = data;
1024 struct pending_cmd *cmd;
1025 struct hci_request req;
1029 BT_DBG("request for %s", hdev->name);
1031 if (!lmp_bredr_capable(hdev))
1032 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1033 MGMT_STATUS_NOT_SUPPORTED);
1035 if (cp->val != 0x00 && cp->val != 0x01)
1036 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1037 MGMT_STATUS_INVALID_PARAMS);
1041 if (!hdev_is_powered(hdev)) {
1042 bool changed = false;
1044 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1048 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1050 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1051 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1054 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1059 err = new_settings(hdev, sk);
1064 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1065 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1066 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1071 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1072 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1076 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1087 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1088 hdev->discov_timeout > 0)
1089 cancel_delayed_work(&hdev->discov_off);
1092 hci_req_init(&req, hdev);
1094 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1096 err = hci_req_run(&req, set_connectable_complete);
1098 mgmt_pending_remove(cmd);
1101 hci_dev_unlock(hdev);
1105 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1108 struct mgmt_mode *cp = data;
1111 BT_DBG("request for %s", hdev->name);
1113 if (cp->val != 0x00 && cp->val != 0x01)
1114 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1115 MGMT_STATUS_INVALID_PARAMS);
1120 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1122 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1124 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1128 err = new_settings(hdev, sk);
1131 hci_dev_unlock(hdev);
1135 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1138 struct mgmt_mode *cp = data;
1139 struct pending_cmd *cmd;
1143 BT_DBG("request for %s", hdev->name);
1145 if (!lmp_bredr_capable(hdev))
1146 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1147 MGMT_STATUS_NOT_SUPPORTED);
1149 if (cp->val != 0x00 && cp->val != 0x01)
1150 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1151 MGMT_STATUS_INVALID_PARAMS);
1155 if (!hdev_is_powered(hdev)) {
1156 bool changed = false;
1158 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1159 &hdev->dev_flags)) {
1160 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1164 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1169 err = new_settings(hdev, sk);
1174 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1175 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1182 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1183 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1187 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1193 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1195 mgmt_pending_remove(cmd);
1200 hci_dev_unlock(hdev);
1204 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1206 struct mgmt_mode *cp = data;
1207 struct pending_cmd *cmd;
1211 BT_DBG("request for %s", hdev->name);
1213 if (!lmp_ssp_capable(hdev))
1214 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1215 MGMT_STATUS_NOT_SUPPORTED);
1217 if (cp->val != 0x00 && cp->val != 0x01)
1218 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1219 MGMT_STATUS_INVALID_PARAMS);
1225 if (!hdev_is_powered(hdev)) {
1226 bool changed = false;
1228 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1229 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1233 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1238 err = new_settings(hdev, sk);
1243 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1244 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1249 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1250 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1254 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1260 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1262 mgmt_pending_remove(cmd);
1267 hci_dev_unlock(hdev);
1271 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1273 struct mgmt_mode *cp = data;
1275 BT_DBG("request for %s", hdev->name);
1278 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1279 MGMT_STATUS_NOT_SUPPORTED);
1281 if (cp->val != 0x00 && cp->val != 0x01)
1282 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1283 MGMT_STATUS_INVALID_PARAMS);
1286 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1288 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1290 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1293 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1295 struct mgmt_mode *cp = data;
1296 struct hci_cp_write_le_host_supported hci_cp;
1297 struct pending_cmd *cmd;
1301 BT_DBG("request for %s", hdev->name);
1303 if (!lmp_le_capable(hdev))
1304 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1305 MGMT_STATUS_NOT_SUPPORTED);
1307 if (cp->val != 0x00 && cp->val != 0x01)
1308 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1309 MGMT_STATUS_INVALID_PARAMS);
1314 enabled = lmp_host_le_capable(hdev);
1316 if (!hdev_is_powered(hdev) || val == enabled) {
1317 bool changed = false;
1319 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1320 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1324 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1329 err = new_settings(hdev, sk);
1334 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1335 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1340 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1346 memset(&hci_cp, 0, sizeof(hci_cp));
1350 hci_cp.simul = lmp_le_br_capable(hdev);
1353 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1356 mgmt_pending_remove(cmd);
1359 hci_dev_unlock(hdev);
1363 /* This is a helper function to test for pending mgmt commands that can
1364 * cause CoD or EIR HCI commands. We can only allow one such pending
1365 * mgmt command at a time since otherwise we cannot easily track what
1366 * the current values are, will be, and based on that calculate if a new
1367 * HCI command needs to be sent and if yes with what value.
1369 static bool pending_eir_or_class(struct hci_dev *hdev)
1371 struct pending_cmd *cmd;
1373 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1374 switch (cmd->opcode) {
1375 case MGMT_OP_ADD_UUID:
1376 case MGMT_OP_REMOVE_UUID:
1377 case MGMT_OP_SET_DEV_CLASS:
1378 case MGMT_OP_SET_POWERED:
1386 static const u8 bluetooth_base_uuid[] = {
1387 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1388 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1391 static u8 get_uuid_size(const u8 *uuid)
1395 if (memcmp(uuid, bluetooth_base_uuid, 12))
1398 val = get_unaligned_le32(&uuid[12]);
1405 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1407 struct pending_cmd *cmd;
1411 cmd = mgmt_pending_find(mgmt_op, hdev);
1415 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1416 hdev->dev_class, 3);
1418 mgmt_pending_remove(cmd);
1421 hci_dev_unlock(hdev);
1424 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1426 BT_DBG("status 0x%02x", status);
1428 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1431 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1433 struct mgmt_cp_add_uuid *cp = data;
1434 struct pending_cmd *cmd;
1435 struct hci_request req;
1436 struct bt_uuid *uuid;
1439 BT_DBG("request for %s", hdev->name);
1443 if (pending_eir_or_class(hdev)) {
1444 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1449 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1455 memcpy(uuid->uuid, cp->uuid, 16);
1456 uuid->svc_hint = cp->svc_hint;
1457 uuid->size = get_uuid_size(cp->uuid);
1459 list_add_tail(&uuid->list, &hdev->uuids);
1461 hci_req_init(&req, hdev);
1466 err = hci_req_run(&req, add_uuid_complete);
1468 if (err != -ENODATA)
1471 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1472 hdev->dev_class, 3);
1476 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1485 hci_dev_unlock(hdev);
1489 static bool enable_service_cache(struct hci_dev *hdev)
1491 if (!hdev_is_powered(hdev))
1494 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1495 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1503 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1505 BT_DBG("status 0x%02x", status);
1507 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1510 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1513 struct mgmt_cp_remove_uuid *cp = data;
1514 struct pending_cmd *cmd;
1515 struct bt_uuid *match, *tmp;
1516 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1517 struct hci_request req;
1520 BT_DBG("request for %s", hdev->name);
1524 if (pending_eir_or_class(hdev)) {
1525 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1530 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1531 err = hci_uuids_clear(hdev);
1533 if (enable_service_cache(hdev)) {
1534 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1535 0, hdev->dev_class, 3);
1544 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1545 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1548 list_del(&match->list);
1554 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1555 MGMT_STATUS_INVALID_PARAMS);
1560 hci_req_init(&req, hdev);
1565 err = hci_req_run(&req, remove_uuid_complete);
1567 if (err != -ENODATA)
1570 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1571 hdev->dev_class, 3);
1575 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1584 hci_dev_unlock(hdev);
1588 static void set_class_complete(struct hci_dev *hdev, u8 status)
1590 BT_DBG("status 0x%02x", status);
1592 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1595 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1598 struct mgmt_cp_set_dev_class *cp = data;
1599 struct pending_cmd *cmd;
1600 struct hci_request req;
1603 BT_DBG("request for %s", hdev->name);
1605 if (!lmp_bredr_capable(hdev))
1606 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1607 MGMT_STATUS_NOT_SUPPORTED);
1611 if (pending_eir_or_class(hdev)) {
1612 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1617 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1618 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1619 MGMT_STATUS_INVALID_PARAMS);
1623 hdev->major_class = cp->major;
1624 hdev->minor_class = cp->minor;
1626 if (!hdev_is_powered(hdev)) {
1627 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1628 hdev->dev_class, 3);
1632 hci_req_init(&req, hdev);
1634 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1635 hci_dev_unlock(hdev);
1636 cancel_delayed_work_sync(&hdev->service_cache);
1643 err = hci_req_run(&req, set_class_complete);
1645 if (err != -ENODATA)
1648 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1649 hdev->dev_class, 3);
1653 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1662 hci_dev_unlock(hdev);
1666 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1669 struct mgmt_cp_load_link_keys *cp = data;
1670 u16 key_count, expected_len;
1673 key_count = __le16_to_cpu(cp->key_count);
1675 expected_len = sizeof(*cp) + key_count *
1676 sizeof(struct mgmt_link_key_info);
1677 if (expected_len != len) {
1678 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1680 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1681 MGMT_STATUS_INVALID_PARAMS);
1684 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1685 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1686 MGMT_STATUS_INVALID_PARAMS);
1688 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1691 for (i = 0; i < key_count; i++) {
1692 struct mgmt_link_key_info *key = &cp->keys[i];
1694 if (key->addr.type != BDADDR_BREDR)
1695 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1696 MGMT_STATUS_INVALID_PARAMS);
1701 hci_link_keys_clear(hdev);
1703 set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1706 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1708 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1710 for (i = 0; i < key_count; i++) {
1711 struct mgmt_link_key_info *key = &cp->keys[i];
1713 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1714 key->type, key->pin_len);
1717 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1719 hci_dev_unlock(hdev);
1724 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1725 u8 addr_type, struct sock *skip_sk)
1727 struct mgmt_ev_device_unpaired ev;
1729 bacpy(&ev.addr.bdaddr, bdaddr);
1730 ev.addr.type = addr_type;
1732 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1736 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1739 struct mgmt_cp_unpair_device *cp = data;
1740 struct mgmt_rp_unpair_device rp;
1741 struct hci_cp_disconnect dc;
1742 struct pending_cmd *cmd;
1743 struct hci_conn *conn;
1746 memset(&rp, 0, sizeof(rp));
1747 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1748 rp.addr.type = cp->addr.type;
1750 if (!bdaddr_type_is_valid(cp->addr.type))
1751 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1752 MGMT_STATUS_INVALID_PARAMS,
1755 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1756 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1757 MGMT_STATUS_INVALID_PARAMS,
1762 if (!hdev_is_powered(hdev)) {
1763 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1764 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1768 if (cp->addr.type == BDADDR_BREDR)
1769 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1771 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1774 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1775 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1779 if (cp->disconnect) {
1780 if (cp->addr.type == BDADDR_BREDR)
1781 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1784 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1791 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1793 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1797 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1804 dc.handle = cpu_to_le16(conn->handle);
1805 dc.reason = 0x13; /* Remote User Terminated Connection */
1806 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1808 mgmt_pending_remove(cmd);
1811 hci_dev_unlock(hdev);
1815 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1818 struct mgmt_cp_disconnect *cp = data;
1819 struct mgmt_rp_disconnect rp;
1820 struct hci_cp_disconnect dc;
1821 struct pending_cmd *cmd;
1822 struct hci_conn *conn;
1827 memset(&rp, 0, sizeof(rp));
1828 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1829 rp.addr.type = cp->addr.type;
1831 if (!bdaddr_type_is_valid(cp->addr.type))
1832 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1833 MGMT_STATUS_INVALID_PARAMS,
1838 if (!test_bit(HCI_UP, &hdev->flags)) {
1839 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1840 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1844 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1845 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1846 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1850 if (cp->addr.type == BDADDR_BREDR)
1851 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1854 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1856 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1857 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1858 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1862 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1868 dc.handle = cpu_to_le16(conn->handle);
1869 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1871 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1873 mgmt_pending_remove(cmd);
1876 hci_dev_unlock(hdev);
1880 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1882 switch (link_type) {
1884 switch (addr_type) {
1885 case ADDR_LE_DEV_PUBLIC:
1886 return BDADDR_LE_PUBLIC;
1889 /* Fallback to LE Random address type */
1890 return BDADDR_LE_RANDOM;
1894 /* Fallback to BR/EDR type */
1895 return BDADDR_BREDR;
1899 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1902 struct mgmt_rp_get_connections *rp;
1912 if (!hdev_is_powered(hdev)) {
1913 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1914 MGMT_STATUS_NOT_POWERED);
1919 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1920 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1924 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1925 rp = kmalloc(rp_len, GFP_KERNEL);
1932 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1933 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1935 bacpy(&rp->addr[i].bdaddr, &c->dst);
1936 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1937 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1942 rp->conn_count = cpu_to_le16(i);
1944 /* Recalculate length in case of filtered SCO connections, etc */
1945 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1947 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1953 hci_dev_unlock(hdev);
1957 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1958 struct mgmt_cp_pin_code_neg_reply *cp)
1960 struct pending_cmd *cmd;
1963 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
1968 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1969 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
1971 mgmt_pending_remove(cmd);
1976 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
1979 struct hci_conn *conn;
1980 struct mgmt_cp_pin_code_reply *cp = data;
1981 struct hci_cp_pin_code_reply reply;
1982 struct pending_cmd *cmd;
1989 if (!hdev_is_powered(hdev)) {
1990 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1991 MGMT_STATUS_NOT_POWERED);
1995 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
1997 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1998 MGMT_STATUS_NOT_CONNECTED);
2002 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2003 struct mgmt_cp_pin_code_neg_reply ncp;
2005 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2007 BT_ERR("PIN code is not 16 bytes long");
2009 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2011 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2012 MGMT_STATUS_INVALID_PARAMS);
2017 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2023 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2024 reply.pin_len = cp->pin_len;
2025 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2027 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2029 mgmt_pending_remove(cmd);
2032 hci_dev_unlock(hdev);
2036 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2039 struct mgmt_cp_set_io_capability *cp = data;
2045 hdev->io_capability = cp->io_capability;
2047 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2048 hdev->io_capability);
2050 hci_dev_unlock(hdev);
2052 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2056 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2058 struct hci_dev *hdev = conn->hdev;
2059 struct pending_cmd *cmd;
2061 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2062 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2065 if (cmd->user_data != conn)
2074 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2076 struct mgmt_rp_pair_device rp;
2077 struct hci_conn *conn = cmd->user_data;
2079 bacpy(&rp.addr.bdaddr, &conn->dst);
2080 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2082 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2085 /* So we don't get further callbacks for this connection */
2086 conn->connect_cfm_cb = NULL;
2087 conn->security_cfm_cb = NULL;
2088 conn->disconn_cfm_cb = NULL;
2092 mgmt_pending_remove(cmd);
2095 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2097 struct pending_cmd *cmd;
2099 BT_DBG("status %u", status);
2101 cmd = find_pairing(conn);
2103 BT_DBG("Unable to find a pending command");
2105 pairing_complete(cmd, mgmt_status(status));
2108 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2110 struct pending_cmd *cmd;
2112 BT_DBG("status %u", status);
2117 cmd = find_pairing(conn);
2119 BT_DBG("Unable to find a pending command");
2121 pairing_complete(cmd, mgmt_status(status));
2124 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2127 struct mgmt_cp_pair_device *cp = data;
2128 struct mgmt_rp_pair_device rp;
2129 struct pending_cmd *cmd;
2130 u8 sec_level, auth_type;
2131 struct hci_conn *conn;
2136 memset(&rp, 0, sizeof(rp));
2137 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2138 rp.addr.type = cp->addr.type;
2140 if (!bdaddr_type_is_valid(cp->addr.type))
2141 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2142 MGMT_STATUS_INVALID_PARAMS,
2147 if (!hdev_is_powered(hdev)) {
2148 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2149 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2153 sec_level = BT_SECURITY_MEDIUM;
2154 if (cp->io_cap == 0x03)
2155 auth_type = HCI_AT_DEDICATED_BONDING;
2157 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2159 if (cp->addr.type == BDADDR_BREDR)
2160 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2161 cp->addr.type, sec_level, auth_type);
2163 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2164 cp->addr.type, sec_level, auth_type);
2169 if (PTR_ERR(conn) == -EBUSY)
2170 status = MGMT_STATUS_BUSY;
2172 status = MGMT_STATUS_CONNECT_FAILED;
2174 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2180 if (conn->connect_cfm_cb) {
2182 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2183 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2187 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2194 /* For LE, just connecting isn't a proof that the pairing finished */
2195 if (cp->addr.type == BDADDR_BREDR)
2196 conn->connect_cfm_cb = pairing_complete_cb;
2198 conn->connect_cfm_cb = le_connect_complete_cb;
2200 conn->security_cfm_cb = pairing_complete_cb;
2201 conn->disconn_cfm_cb = pairing_complete_cb;
2202 conn->io_capability = cp->io_cap;
2203 cmd->user_data = conn;
2205 if (conn->state == BT_CONNECTED &&
2206 hci_conn_security(conn, sec_level, auth_type))
2207 pairing_complete(cmd, 0);
2212 hci_dev_unlock(hdev);
2216 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2219 struct mgmt_addr_info *addr = data;
2220 struct pending_cmd *cmd;
2221 struct hci_conn *conn;
2228 if (!hdev_is_powered(hdev)) {
2229 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2230 MGMT_STATUS_NOT_POWERED);
2234 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2236 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2237 MGMT_STATUS_INVALID_PARAMS);
2241 conn = cmd->user_data;
2243 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2244 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2245 MGMT_STATUS_INVALID_PARAMS);
2249 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2251 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2252 addr, sizeof(*addr));
2254 hci_dev_unlock(hdev);
2258 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2259 bdaddr_t *bdaddr, u8 type, u16 mgmt_op,
2260 u16 hci_op, __le32 passkey)
2262 struct pending_cmd *cmd;
2263 struct hci_conn *conn;
2268 if (!hdev_is_powered(hdev)) {
2269 err = cmd_status(sk, hdev->id, mgmt_op,
2270 MGMT_STATUS_NOT_POWERED);
2274 if (type == BDADDR_BREDR)
2275 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
2277 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
2280 err = cmd_status(sk, hdev->id, mgmt_op,
2281 MGMT_STATUS_NOT_CONNECTED);
2285 if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
2286 /* Continue with pairing via SMP */
2287 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2290 err = cmd_status(sk, hdev->id, mgmt_op,
2291 MGMT_STATUS_SUCCESS);
2293 err = cmd_status(sk, hdev->id, mgmt_op,
2294 MGMT_STATUS_FAILED);
2299 cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr));
2305 /* Continue with pairing via HCI */
2306 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2307 struct hci_cp_user_passkey_reply cp;
2309 bacpy(&cp.bdaddr, bdaddr);
2310 cp.passkey = passkey;
2311 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2313 err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr);
2316 mgmt_pending_remove(cmd);
2319 hci_dev_unlock(hdev);
2323 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2324 void *data, u16 len)
2326 struct mgmt_cp_pin_code_neg_reply *cp = data;
2330 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2331 MGMT_OP_PIN_CODE_NEG_REPLY,
2332 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2335 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2338 struct mgmt_cp_user_confirm_reply *cp = data;
2342 if (len != sizeof(*cp))
2343 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2344 MGMT_STATUS_INVALID_PARAMS);
2346 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2347 MGMT_OP_USER_CONFIRM_REPLY,
2348 HCI_OP_USER_CONFIRM_REPLY, 0);
2351 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2352 void *data, u16 len)
2354 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2358 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2359 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2360 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2363 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2366 struct mgmt_cp_user_passkey_reply *cp = data;
2370 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2371 MGMT_OP_USER_PASSKEY_REPLY,
2372 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2375 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2376 void *data, u16 len)
2378 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2382 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2383 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2384 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2387 static void update_name(struct hci_request *req)
2389 struct hci_dev *hdev = req->hdev;
2390 struct hci_cp_write_local_name cp;
2392 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2394 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2397 static void set_name_complete(struct hci_dev *hdev, u8 status)
2399 struct mgmt_cp_set_local_name *cp;
2400 struct pending_cmd *cmd;
2402 BT_DBG("status 0x%02x", status);
2406 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2413 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2414 mgmt_status(status));
2416 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2419 mgmt_pending_remove(cmd);
2422 hci_dev_unlock(hdev);
2425 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2428 struct mgmt_cp_set_local_name *cp = data;
2429 struct pending_cmd *cmd;
2430 struct hci_request req;
2437 /* If the old values are the same as the new ones just return a
2438 * direct command complete event.
2440 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2441 !memcmp(hdev->short_name, cp->short_name,
2442 sizeof(hdev->short_name))) {
2443 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2448 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2450 if (!hdev_is_powered(hdev)) {
2451 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2453 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2458 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2464 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2470 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2472 hci_req_init(&req, hdev);
2474 if (lmp_bredr_capable(hdev)) {
2479 if (lmp_le_capable(hdev))
2480 hci_update_ad(&req);
2482 err = hci_req_run(&req, set_name_complete);
2484 mgmt_pending_remove(cmd);
2487 hci_dev_unlock(hdev);
2491 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2492 void *data, u16 data_len)
2494 struct pending_cmd *cmd;
2497 BT_DBG("%s", hdev->name);
2501 if (!hdev_is_powered(hdev)) {
2502 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2503 MGMT_STATUS_NOT_POWERED);
2507 if (!lmp_ssp_capable(hdev)) {
2508 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2509 MGMT_STATUS_NOT_SUPPORTED);
2513 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2514 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2519 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2525 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2527 mgmt_pending_remove(cmd);
2530 hci_dev_unlock(hdev);
2534 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2535 void *data, u16 len)
2537 struct mgmt_cp_add_remote_oob_data *cp = data;
2541 BT_DBG("%s ", hdev->name);
2545 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2548 status = MGMT_STATUS_FAILED;
2550 status = MGMT_STATUS_SUCCESS;
2552 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2553 &cp->addr, sizeof(cp->addr));
2555 hci_dev_unlock(hdev);
2559 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2560 void *data, u16 len)
2562 struct mgmt_cp_remove_remote_oob_data *cp = data;
2566 BT_DBG("%s", hdev->name);
2570 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2572 status = MGMT_STATUS_INVALID_PARAMS;
2574 status = MGMT_STATUS_SUCCESS;
2576 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2577 status, &cp->addr, sizeof(cp->addr));
2579 hci_dev_unlock(hdev);
2583 int mgmt_interleaved_discovery(struct hci_dev *hdev)
2587 BT_DBG("%s", hdev->name);
2591 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
2593 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2595 hci_dev_unlock(hdev);
2600 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2601 void *data, u16 len)
2603 struct mgmt_cp_start_discovery *cp = data;
2604 struct pending_cmd *cmd;
2607 BT_DBG("%s", hdev->name);
2611 if (!hdev_is_powered(hdev)) {
2612 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2613 MGMT_STATUS_NOT_POWERED);
2617 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2618 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2623 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2624 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2629 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2635 hdev->discovery.type = cp->type;
2637 switch (hdev->discovery.type) {
2638 case DISCOV_TYPE_BREDR:
2639 if (!lmp_bredr_capable(hdev)) {
2640 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2641 MGMT_STATUS_NOT_SUPPORTED);
2642 mgmt_pending_remove(cmd);
2646 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2649 case DISCOV_TYPE_LE:
2650 if (!lmp_host_le_capable(hdev)) {
2651 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2652 MGMT_STATUS_NOT_SUPPORTED);
2653 mgmt_pending_remove(cmd);
2657 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2658 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2661 case DISCOV_TYPE_INTERLEAVED:
2662 if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
2663 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2664 MGMT_STATUS_NOT_SUPPORTED);
2665 mgmt_pending_remove(cmd);
2669 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, LE_SCAN_WIN,
2670 LE_SCAN_TIMEOUT_BREDR_LE);
2674 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2675 MGMT_STATUS_INVALID_PARAMS);
2676 mgmt_pending_remove(cmd);
2681 mgmt_pending_remove(cmd);
2683 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2686 hci_dev_unlock(hdev);
2690 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2693 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2694 struct pending_cmd *cmd;
2695 struct hci_cp_remote_name_req_cancel cp;
2696 struct inquiry_entry *e;
2699 BT_DBG("%s", hdev->name);
2703 if (!hci_discovery_active(hdev)) {
2704 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2705 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2706 sizeof(mgmt_cp->type));
2710 if (hdev->discovery.type != mgmt_cp->type) {
2711 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2712 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2713 sizeof(mgmt_cp->type));
2717 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2723 switch (hdev->discovery.state) {
2724 case DISCOVERY_FINDING:
2725 if (test_bit(HCI_INQUIRY, &hdev->flags))
2726 err = hci_cancel_inquiry(hdev);
2728 err = hci_cancel_le_scan(hdev);
2732 case DISCOVERY_RESOLVING:
2733 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2736 mgmt_pending_remove(cmd);
2737 err = cmd_complete(sk, hdev->id,
2738 MGMT_OP_STOP_DISCOVERY, 0,
2740 sizeof(mgmt_cp->type));
2741 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2745 bacpy(&cp.bdaddr, &e->data.bdaddr);
2746 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2752 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2757 mgmt_pending_remove(cmd);
2759 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2762 hci_dev_unlock(hdev);
2766 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2769 struct mgmt_cp_confirm_name *cp = data;
2770 struct inquiry_entry *e;
2773 BT_DBG("%s", hdev->name);
2777 if (!hci_discovery_active(hdev)) {
2778 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2779 MGMT_STATUS_FAILED);
2783 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2785 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2786 MGMT_STATUS_INVALID_PARAMS);
2790 if (cp->name_known) {
2791 e->name_state = NAME_KNOWN;
2794 e->name_state = NAME_NEEDED;
2795 hci_inquiry_cache_update_resolve(hdev, e);
2798 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2802 hci_dev_unlock(hdev);
2806 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2809 struct mgmt_cp_block_device *cp = data;
2813 BT_DBG("%s", hdev->name);
2815 if (!bdaddr_type_is_valid(cp->addr.type))
2816 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2817 MGMT_STATUS_INVALID_PARAMS,
2818 &cp->addr, sizeof(cp->addr));
2822 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2824 status = MGMT_STATUS_FAILED;
2826 status = MGMT_STATUS_SUCCESS;
2828 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2829 &cp->addr, sizeof(cp->addr));
2831 hci_dev_unlock(hdev);
2836 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2839 struct mgmt_cp_unblock_device *cp = data;
2843 BT_DBG("%s", hdev->name);
2845 if (!bdaddr_type_is_valid(cp->addr.type))
2846 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
2847 MGMT_STATUS_INVALID_PARAMS,
2848 &cp->addr, sizeof(cp->addr));
2852 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2854 status = MGMT_STATUS_INVALID_PARAMS;
2856 status = MGMT_STATUS_SUCCESS;
2858 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2859 &cp->addr, sizeof(cp->addr));
2861 hci_dev_unlock(hdev);
2866 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2869 struct mgmt_cp_set_device_id *cp = data;
2870 struct hci_request req;
2874 BT_DBG("%s", hdev->name);
2876 source = __le16_to_cpu(cp->source);
2878 if (source > 0x0002)
2879 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2880 MGMT_STATUS_INVALID_PARAMS);
2884 hdev->devid_source = source;
2885 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2886 hdev->devid_product = __le16_to_cpu(cp->product);
2887 hdev->devid_version = __le16_to_cpu(cp->version);
2889 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2891 hci_req_init(&req, hdev);
2893 hci_req_run(&req, NULL);
2895 hci_dev_unlock(hdev);
2900 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
2902 struct pending_cmd *cmd;
2904 BT_DBG("status 0x%02x", status);
2908 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2913 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2914 mgmt_status(status));
2916 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2917 new_settings(hdev, cmd->sk);
2920 mgmt_pending_remove(cmd);
2923 hci_dev_unlock(hdev);
2926 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2927 void *data, u16 len)
2929 struct mgmt_mode *cp = data;
2930 struct hci_cp_write_page_scan_activity acp;
2931 struct pending_cmd *cmd;
2932 struct hci_request req;
2936 BT_DBG("%s", hdev->name);
2938 if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
2939 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2940 MGMT_STATUS_NOT_SUPPORTED);
2942 if (cp->val != 0x00 && cp->val != 0x01)
2943 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2944 MGMT_STATUS_INVALID_PARAMS);
2946 if (!hdev_is_powered(hdev))
2947 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2948 MGMT_STATUS_NOT_POWERED);
2950 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2951 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2952 MGMT_STATUS_REJECTED);
2956 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
2957 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2963 type = PAGE_SCAN_TYPE_INTERLACED;
2965 /* 160 msec page scan interval */
2966 acp.interval = __constant_cpu_to_le16(0x0100);
2968 type = PAGE_SCAN_TYPE_STANDARD; /* default */
2970 /* default 1.28 sec page scan */
2971 acp.interval = __constant_cpu_to_le16(0x0800);
2974 /* default 11.25 msec page scan window */
2975 acp.window = __constant_cpu_to_le16(0x0012);
2977 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
2984 hci_req_init(&req, hdev);
2986 hci_req_add(&req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp), &acp);
2987 hci_req_add(&req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
2989 err = hci_req_run(&req, fast_connectable_complete);
2991 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2992 MGMT_STATUS_FAILED);
2993 mgmt_pending_remove(cmd);
2997 hci_dev_unlock(hdev);
3002 static bool ltk_is_valid(struct mgmt_ltk_info *key)
3004 if (key->authenticated != 0x00 && key->authenticated != 0x01)
3006 if (key->master != 0x00 && key->master != 0x01)
3008 if (!bdaddr_type_is_le(key->addr.type))
3013 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3014 void *cp_data, u16 len)
3016 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3017 u16 key_count, expected_len;
3020 key_count = __le16_to_cpu(cp->key_count);
3022 expected_len = sizeof(*cp) + key_count *
3023 sizeof(struct mgmt_ltk_info);
3024 if (expected_len != len) {
3025 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3027 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3028 MGMT_STATUS_INVALID_PARAMS);
3031 BT_DBG("%s key_count %u", hdev->name, key_count);
3033 for (i = 0; i < key_count; i++) {
3034 struct mgmt_ltk_info *key = &cp->keys[i];
3036 if (!ltk_is_valid(key))
3037 return cmd_status(sk, hdev->id,
3038 MGMT_OP_LOAD_LONG_TERM_KEYS,
3039 MGMT_STATUS_INVALID_PARAMS);
3044 hci_smp_ltks_clear(hdev);
3046 for (i = 0; i < key_count; i++) {
3047 struct mgmt_ltk_info *key = &cp->keys[i];
3053 type = HCI_SMP_LTK_SLAVE;
3055 hci_add_ltk(hdev, &key->addr.bdaddr,
3056 bdaddr_to_le(key->addr.type),
3057 type, 0, key->authenticated, key->val,
3058 key->enc_size, key->ediv, key->rand);
3061 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3064 hci_dev_unlock(hdev);
3069 static const struct mgmt_handler {
3070 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3074 } mgmt_handlers[] = {
3075 { NULL }, /* 0x0000 (no command) */
3076 { read_version, false, MGMT_READ_VERSION_SIZE },
3077 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3078 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3079 { read_controller_info, false, MGMT_READ_INFO_SIZE },
3080 { set_powered, false, MGMT_SETTING_SIZE },
3081 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
3082 { set_connectable, false, MGMT_SETTING_SIZE },
3083 { set_fast_connectable, false, MGMT_SETTING_SIZE },
3084 { set_pairable, false, MGMT_SETTING_SIZE },
3085 { set_link_security, false, MGMT_SETTING_SIZE },
3086 { set_ssp, false, MGMT_SETTING_SIZE },
3087 { set_hs, false, MGMT_SETTING_SIZE },
3088 { set_le, false, MGMT_SETTING_SIZE },
3089 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
3090 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
3091 { add_uuid, false, MGMT_ADD_UUID_SIZE },
3092 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
3093 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
3094 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
3095 { disconnect, false, MGMT_DISCONNECT_SIZE },
3096 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
3097 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
3098 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
3099 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
3100 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
3101 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
3102 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
3103 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
3104 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
3105 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
3106 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
3107 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
3108 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
3109 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
3110 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3111 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3112 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3113 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3114 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3115 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3119 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3123 struct mgmt_hdr *hdr;
3124 u16 opcode, index, len;
3125 struct hci_dev *hdev = NULL;
3126 const struct mgmt_handler *handler;
3129 BT_DBG("got %zu bytes", msglen);
3131 if (msglen < sizeof(*hdr))
3134 buf = kmalloc(msglen, GFP_KERNEL);
3138 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3144 opcode = __le16_to_cpu(hdr->opcode);
3145 index = __le16_to_cpu(hdr->index);
3146 len = __le16_to_cpu(hdr->len);
3148 if (len != msglen - sizeof(*hdr)) {
3153 if (index != MGMT_INDEX_NONE) {
3154 hdev = hci_dev_get(index);
3156 err = cmd_status(sk, index, opcode,
3157 MGMT_STATUS_INVALID_INDEX);
3162 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3163 mgmt_handlers[opcode].func == NULL) {
3164 BT_DBG("Unknown op %u", opcode);
3165 err = cmd_status(sk, index, opcode,
3166 MGMT_STATUS_UNKNOWN_COMMAND);
3170 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3171 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3172 err = cmd_status(sk, index, opcode,
3173 MGMT_STATUS_INVALID_INDEX);
3177 handler = &mgmt_handlers[opcode];
3179 if ((handler->var_len && len < handler->data_len) ||
3180 (!handler->var_len && len != handler->data_len)) {
3181 err = cmd_status(sk, index, opcode,
3182 MGMT_STATUS_INVALID_PARAMS);
3187 mgmt_init_hdev(sk, hdev);
3189 cp = buf + sizeof(*hdr);
3191 err = handler->func(sk, hdev, cp, len);
3205 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
3209 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
3210 mgmt_pending_remove(cmd);
3213 int mgmt_index_added(struct hci_dev *hdev)
3215 if (!mgmt_valid_hdev(hdev))
3218 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3221 int mgmt_index_removed(struct hci_dev *hdev)
3223 u8 status = MGMT_STATUS_INVALID_INDEX;
3225 if (!mgmt_valid_hdev(hdev))
3228 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3230 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3235 struct hci_dev *hdev;
3239 static void settings_rsp(struct pending_cmd *cmd, void *data)
3241 struct cmd_lookup *match = data;
3243 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
3245 list_del(&cmd->list);
3247 if (match->sk == NULL) {
3248 match->sk = cmd->sk;
3249 sock_hold(match->sk);
3252 mgmt_pending_free(cmd);
3255 static void set_bredr_scan(struct hci_request *req)
3257 struct hci_dev *hdev = req->hdev;
3260 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3262 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3263 scan |= SCAN_INQUIRY;
3266 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3269 static void powered_complete(struct hci_dev *hdev, u8 status)
3271 struct cmd_lookup match = { NULL, hdev };
3273 BT_DBG("status 0x%02x", status);
3277 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3279 new_settings(hdev, match.sk);
3281 hci_dev_unlock(hdev);
3287 static int powered_update_hci(struct hci_dev *hdev)
3289 struct hci_request req;
3292 hci_req_init(&req, hdev);
3294 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3295 !lmp_host_ssp_capable(hdev)) {
3298 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3301 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
3302 struct hci_cp_write_le_host_supported cp;
3305 cp.simul = lmp_le_br_capable(hdev);
3307 /* Check first if we already have the right
3308 * host state (host features set)
3310 if (cp.le != lmp_host_le_capable(hdev) ||
3311 cp.simul != lmp_host_le_br_capable(hdev))
3312 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3316 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3317 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3318 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3319 sizeof(link_sec), &link_sec);
3321 if (lmp_bredr_capable(hdev)) {
3322 set_bredr_scan(&req);
3328 return hci_req_run(&req, powered_complete);
3331 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3333 struct cmd_lookup match = { NULL, hdev };
3334 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3335 u8 zero_cod[] = { 0, 0, 0 };
3338 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3342 if (powered_update_hci(hdev) == 0)
3345 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3350 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3351 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3353 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3354 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3355 zero_cod, sizeof(zero_cod), NULL);
3358 err = new_settings(hdev, match.sk);
3366 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3368 struct cmd_lookup match = { NULL, hdev };
3369 bool changed = false;
3373 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3376 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3380 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3384 err = new_settings(hdev, match.sk);
3392 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3394 struct pending_cmd *cmd;
3395 bool changed = false;
3399 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3402 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3406 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
3409 err = new_settings(hdev, cmd ? cmd->sk : NULL);
3414 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3416 u8 mgmt_err = mgmt_status(status);
3418 if (scan & SCAN_PAGE)
3419 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3420 cmd_status_rsp, &mgmt_err);
3422 if (scan & SCAN_INQUIRY)
3423 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3424 cmd_status_rsp, &mgmt_err);
3429 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3432 struct mgmt_ev_new_link_key ev;
3434 memset(&ev, 0, sizeof(ev));
3436 ev.store_hint = persistent;
3437 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3438 ev.key.addr.type = BDADDR_BREDR;
3439 ev.key.type = key->type;
3440 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3441 ev.key.pin_len = key->pin_len;
3443 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3446 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3448 struct mgmt_ev_new_long_term_key ev;
3450 memset(&ev, 0, sizeof(ev));
3452 ev.store_hint = persistent;
3453 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3454 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3455 ev.key.authenticated = key->authenticated;
3456 ev.key.enc_size = key->enc_size;
3457 ev.key.ediv = key->ediv;
3459 if (key->type == HCI_SMP_LTK)
3462 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3463 memcpy(ev.key.val, key->val, sizeof(key->val));
3465 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3469 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3470 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3474 struct mgmt_ev_device_connected *ev = (void *) buf;
3477 bacpy(&ev->addr.bdaddr, bdaddr);
3478 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3480 ev->flags = __cpu_to_le32(flags);
3483 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3486 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3487 eir_len = eir_append_data(ev->eir, eir_len,
3488 EIR_CLASS_OF_DEV, dev_class, 3);
3490 ev->eir_len = cpu_to_le16(eir_len);
3492 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3493 sizeof(*ev) + eir_len, NULL);
3496 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3498 struct mgmt_cp_disconnect *cp = cmd->param;
3499 struct sock **sk = data;
3500 struct mgmt_rp_disconnect rp;
3502 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3503 rp.addr.type = cp->addr.type;
3505 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3511 mgmt_pending_remove(cmd);
3514 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3516 struct hci_dev *hdev = data;
3517 struct mgmt_cp_unpair_device *cp = cmd->param;
3518 struct mgmt_rp_unpair_device rp;
3520 memset(&rp, 0, sizeof(rp));
3521 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3522 rp.addr.type = cp->addr.type;
3524 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3526 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3528 mgmt_pending_remove(cmd);
3531 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3532 u8 link_type, u8 addr_type, u8 reason)
3534 struct mgmt_ev_device_disconnected ev;
3535 struct sock *sk = NULL;
3538 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3540 bacpy(&ev.addr.bdaddr, bdaddr);
3541 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3544 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3550 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3556 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3557 u8 link_type, u8 addr_type, u8 status)
3559 struct mgmt_rp_disconnect rp;
3560 struct pending_cmd *cmd;
3563 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3566 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3570 bacpy(&rp.addr.bdaddr, bdaddr);
3571 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3573 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3574 mgmt_status(status), &rp, sizeof(rp));
3576 mgmt_pending_remove(cmd);
3581 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3582 u8 addr_type, u8 status)
3584 struct mgmt_ev_connect_failed ev;
3586 bacpy(&ev.addr.bdaddr, bdaddr);
3587 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3588 ev.status = mgmt_status(status);
3590 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3593 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3595 struct mgmt_ev_pin_code_request ev;
3597 bacpy(&ev.addr.bdaddr, bdaddr);
3598 ev.addr.type = BDADDR_BREDR;
3601 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3605 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3608 struct pending_cmd *cmd;
3609 struct mgmt_rp_pin_code_reply rp;
3612 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3616 bacpy(&rp.addr.bdaddr, bdaddr);
3617 rp.addr.type = BDADDR_BREDR;
3619 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3620 mgmt_status(status), &rp, sizeof(rp));
3622 mgmt_pending_remove(cmd);
3627 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3630 struct pending_cmd *cmd;
3631 struct mgmt_rp_pin_code_reply rp;
3634 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3638 bacpy(&rp.addr.bdaddr, bdaddr);
3639 rp.addr.type = BDADDR_BREDR;
3641 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3642 mgmt_status(status), &rp, sizeof(rp));
3644 mgmt_pending_remove(cmd);
3649 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3650 u8 link_type, u8 addr_type, __le32 value,
3653 struct mgmt_ev_user_confirm_request ev;
3655 BT_DBG("%s", hdev->name);
3657 bacpy(&ev.addr.bdaddr, bdaddr);
3658 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3659 ev.confirm_hint = confirm_hint;
3662 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3666 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3667 u8 link_type, u8 addr_type)
3669 struct mgmt_ev_user_passkey_request ev;
3671 BT_DBG("%s", hdev->name);
3673 bacpy(&ev.addr.bdaddr, bdaddr);
3674 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3676 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3680 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3681 u8 link_type, u8 addr_type, u8 status,
3684 struct pending_cmd *cmd;
3685 struct mgmt_rp_user_confirm_reply rp;
3688 cmd = mgmt_pending_find(opcode, hdev);
3692 bacpy(&rp.addr.bdaddr, bdaddr);
3693 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3694 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3697 mgmt_pending_remove(cmd);
3702 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3703 u8 link_type, u8 addr_type, u8 status)
3705 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3706 status, MGMT_OP_USER_CONFIRM_REPLY);
3709 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3710 u8 link_type, u8 addr_type, u8 status)
3712 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3714 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3717 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3718 u8 link_type, u8 addr_type, u8 status)
3720 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3721 status, MGMT_OP_USER_PASSKEY_REPLY);
3724 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3725 u8 link_type, u8 addr_type, u8 status)
3727 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3729 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3732 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3733 u8 link_type, u8 addr_type, u32 passkey,
3736 struct mgmt_ev_passkey_notify ev;
3738 BT_DBG("%s", hdev->name);
3740 bacpy(&ev.addr.bdaddr, bdaddr);
3741 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3742 ev.passkey = __cpu_to_le32(passkey);
3743 ev.entered = entered;
3745 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3748 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3749 u8 addr_type, u8 status)
3751 struct mgmt_ev_auth_failed ev;
3753 bacpy(&ev.addr.bdaddr, bdaddr);
3754 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3755 ev.status = mgmt_status(status);
3757 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3760 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3762 struct cmd_lookup match = { NULL, hdev };
3763 bool changed = false;
3767 u8 mgmt_err = mgmt_status(status);
3768 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3769 cmd_status_rsp, &mgmt_err);
3773 if (test_bit(HCI_AUTH, &hdev->flags)) {
3774 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3777 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3781 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3785 err = new_settings(hdev, match.sk);
3793 static void clear_eir(struct hci_request *req)
3795 struct hci_dev *hdev = req->hdev;
3796 struct hci_cp_write_eir cp;
3798 if (!lmp_ext_inq_capable(hdev))
3801 memset(hdev->eir, 0, sizeof(hdev->eir));
3803 memset(&cp, 0, sizeof(cp));
3805 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3808 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3810 struct cmd_lookup match = { NULL, hdev };
3811 struct hci_request req;
3812 bool changed = false;
3816 u8 mgmt_err = mgmt_status(status);
3818 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
3820 err = new_settings(hdev, NULL);
3822 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3829 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3832 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3836 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
3839 err = new_settings(hdev, match.sk);
3844 hci_req_init(&req, hdev);
3846 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3851 hci_req_run(&req, NULL);
3856 static void sk_lookup(struct pending_cmd *cmd, void *data)
3858 struct cmd_lookup *match = data;
3860 if (match->sk == NULL) {
3861 match->sk = cmd->sk;
3862 sock_hold(match->sk);
3866 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3869 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3872 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
3873 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
3874 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
3877 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3886 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
3888 struct mgmt_cp_set_local_name ev;
3889 struct pending_cmd *cmd;
3894 memset(&ev, 0, sizeof(ev));
3895 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3896 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
3898 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3900 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3902 /* If this is a HCI command related to powering on the
3903 * HCI dev don't send any mgmt signals.
3905 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
3909 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
3910 cmd ? cmd->sk : NULL);
3913 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
3914 u8 *randomizer, u8 status)
3916 struct pending_cmd *cmd;
3919 BT_DBG("%s status %u", hdev->name, status);
3921 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3926 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3927 mgmt_status(status));
3929 struct mgmt_rp_read_local_oob_data rp;
3931 memcpy(rp.hash, hash, sizeof(rp.hash));
3932 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
3934 err = cmd_complete(cmd->sk, hdev->id,
3935 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
3939 mgmt_pending_remove(cmd);
3944 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3946 struct cmd_lookup match = { NULL, hdev };
3947 bool changed = false;
3951 u8 mgmt_err = mgmt_status(status);
3953 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
3955 err = new_settings(hdev, NULL);
3957 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
3964 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3967 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3971 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
3974 err = new_settings(hdev, match.sk);
3982 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3983 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
3984 ssp, u8 *eir, u16 eir_len)
3987 struct mgmt_ev_device_found *ev = (void *) buf;
3990 /* Leave 5 bytes for a potential CoD field */
3991 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
3994 memset(buf, 0, sizeof(buf));
3996 bacpy(&ev->addr.bdaddr, bdaddr);
3997 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4000 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4002 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4005 memcpy(ev->eir, eir, eir_len);
4007 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4008 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4011 ev->eir_len = cpu_to_le16(eir_len);
4012 ev_size = sizeof(*ev) + eir_len;
4014 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4017 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4018 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4020 struct mgmt_ev_device_found *ev;
4021 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4024 ev = (struct mgmt_ev_device_found *) buf;
4026 memset(buf, 0, sizeof(buf));
4028 bacpy(&ev->addr.bdaddr, bdaddr);
4029 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4032 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4035 ev->eir_len = cpu_to_le16(eir_len);
4037 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
4038 sizeof(*ev) + eir_len, NULL);
4041 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
4043 struct pending_cmd *cmd;
4047 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4049 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4053 type = hdev->discovery.type;
4055 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4056 &type, sizeof(type));
4057 mgmt_pending_remove(cmd);
4062 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
4064 struct pending_cmd *cmd;
4067 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4071 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4072 &hdev->discovery.type, sizeof(hdev->discovery.type));
4073 mgmt_pending_remove(cmd);
4078 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4080 struct mgmt_ev_discovering ev;
4081 struct pending_cmd *cmd;
4083 BT_DBG("%s discovering %u", hdev->name, discovering);
4086 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4088 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4091 u8 type = hdev->discovery.type;
4093 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4095 mgmt_pending_remove(cmd);
4098 memset(&ev, 0, sizeof(ev));
4099 ev.type = hdev->discovery.type;
4100 ev.discovering = discovering;
4102 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4105 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4107 struct pending_cmd *cmd;
4108 struct mgmt_ev_device_blocked ev;
4110 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4112 bacpy(&ev.addr.bdaddr, bdaddr);
4113 ev.addr.type = type;
4115 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4116 cmd ? cmd->sk : NULL);
4119 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4121 struct pending_cmd *cmd;
4122 struct mgmt_ev_device_unblocked ev;
4124 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4126 bacpy(&ev.addr.bdaddr, bdaddr);
4127 ev.addr.type = type;
4129 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4130 cmd ? cmd->sk : NULL);
4133 module_param(enable_hs, bool, 0644);
4134 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");