2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 2
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
84 MGMT_EV_INDEX_REMOVED,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
106 * These LE scan and inquiry parameters were chosen according to LE General
107 * Discovery Procedure specification.
109 #define LE_SCAN_TYPE 0x01
110 #define LE_SCAN_WIN 0x12
111 #define LE_SCAN_INT 0x12
112 #define LE_SCAN_TIMEOUT_LE_ONLY 10240 /* TGAP(gen_disc_scan_min) */
113 #define LE_SCAN_TIMEOUT_BREDR_LE 5120 /* TGAP(100)/2 */
115 #define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
116 #define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */
118 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
120 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
121 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
124 struct list_head list;
132 /* HCI to MGMT error code conversion table */
133 static u8 mgmt_status_table[] = {
135 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
136 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
137 MGMT_STATUS_FAILED, /* Hardware Failure */
138 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
139 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
140 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
141 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
142 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
143 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
144 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
145 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
146 MGMT_STATUS_BUSY, /* Command Disallowed */
147 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
148 MGMT_STATUS_REJECTED, /* Rejected Security */
149 MGMT_STATUS_REJECTED, /* Rejected Personal */
150 MGMT_STATUS_TIMEOUT, /* Host Timeout */
151 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
152 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
153 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
154 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
155 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
156 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
157 MGMT_STATUS_BUSY, /* Repeated Attempts */
158 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
159 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
161 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
162 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
163 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
165 MGMT_STATUS_FAILED, /* Unspecified Error */
166 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
167 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
168 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
169 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
170 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
171 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
172 MGMT_STATUS_FAILED, /* Unit Link Key Used */
173 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
174 MGMT_STATUS_TIMEOUT, /* Instant Passed */
175 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
176 MGMT_STATUS_FAILED, /* Transaction Collision */
177 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
178 MGMT_STATUS_REJECTED, /* QoS Rejected */
179 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
180 MGMT_STATUS_REJECTED, /* Insufficient Security */
181 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
182 MGMT_STATUS_BUSY, /* Role Switch Pending */
183 MGMT_STATUS_FAILED, /* Slot Violation */
184 MGMT_STATUS_FAILED, /* Role Switch Failed */
185 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
186 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
187 MGMT_STATUS_BUSY, /* Host Busy Pairing */
188 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
189 MGMT_STATUS_BUSY, /* Controller Busy */
190 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
191 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
192 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
193 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
194 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
197 bool mgmt_valid_hdev(struct hci_dev *hdev)
199 return hdev->dev_type == HCI_BREDR;
202 static u8 mgmt_status(u8 hci_status)
204 if (hci_status < ARRAY_SIZE(mgmt_status_table))
205 return mgmt_status_table[hci_status];
207 return MGMT_STATUS_FAILED;
210 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
213 struct mgmt_hdr *hdr;
214 struct mgmt_ev_cmd_status *ev;
217 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
219 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
223 hdr = (void *) skb_put(skb, sizeof(*hdr));
225 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
226 hdr->index = cpu_to_le16(index);
227 hdr->len = cpu_to_le16(sizeof(*ev));
229 ev = (void *) skb_put(skb, sizeof(*ev));
231 ev->opcode = cpu_to_le16(cmd);
233 err = sock_queue_rcv_skb(sk, skb);
240 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
241 void *rp, size_t rp_len)
244 struct mgmt_hdr *hdr;
245 struct mgmt_ev_cmd_complete *ev;
248 BT_DBG("sock %p", sk);
250 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
254 hdr = (void *) skb_put(skb, sizeof(*hdr));
256 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
257 hdr->index = cpu_to_le16(index);
258 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
260 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
261 ev->opcode = cpu_to_le16(cmd);
265 memcpy(ev->data, rp, rp_len);
267 err = sock_queue_rcv_skb(sk, skb);
274 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
277 struct mgmt_rp_read_version rp;
279 BT_DBG("sock %p", sk);
281 rp.version = MGMT_VERSION;
282 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
284 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
288 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
291 struct mgmt_rp_read_commands *rp;
292 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
293 const u16 num_events = ARRAY_SIZE(mgmt_events);
298 BT_DBG("sock %p", sk);
300 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
302 rp = kmalloc(rp_size, GFP_KERNEL);
306 rp->num_commands = __constant_cpu_to_le16(num_commands);
307 rp->num_events = __constant_cpu_to_le16(num_events);
309 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
310 put_unaligned_le16(mgmt_commands[i], opcode);
312 for (i = 0; i < num_events; i++, opcode++)
313 put_unaligned_le16(mgmt_events[i], opcode);
315 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
322 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
325 struct mgmt_rp_read_index_list *rp;
331 BT_DBG("sock %p", sk);
333 read_lock(&hci_dev_list_lock);
336 list_for_each_entry(d, &hci_dev_list, list) {
337 if (!mgmt_valid_hdev(d))
343 rp_len = sizeof(*rp) + (2 * count);
344 rp = kmalloc(rp_len, GFP_ATOMIC);
346 read_unlock(&hci_dev_list_lock);
350 rp->num_controllers = cpu_to_le16(count);
353 list_for_each_entry(d, &hci_dev_list, list) {
354 if (test_bit(HCI_SETUP, &d->dev_flags))
357 if (!mgmt_valid_hdev(d))
360 rp->index[i++] = cpu_to_le16(d->id);
361 BT_DBG("Added hci%u", d->id);
364 read_unlock(&hci_dev_list_lock);
366 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
374 static u32 get_supported_settings(struct hci_dev *hdev)
378 settings |= MGMT_SETTING_POWERED;
379 settings |= MGMT_SETTING_CONNECTABLE;
380 settings |= MGMT_SETTING_FAST_CONNECTABLE;
381 settings |= MGMT_SETTING_DISCOVERABLE;
382 settings |= MGMT_SETTING_PAIRABLE;
384 if (lmp_ssp_capable(hdev))
385 settings |= MGMT_SETTING_SSP;
387 if (lmp_bredr_capable(hdev)) {
388 settings |= MGMT_SETTING_BREDR;
389 settings |= MGMT_SETTING_LINK_SECURITY;
393 settings |= MGMT_SETTING_HS;
395 if (lmp_le_capable(hdev))
396 settings |= MGMT_SETTING_LE;
401 static u32 get_current_settings(struct hci_dev *hdev)
405 if (hdev_is_powered(hdev))
406 settings |= MGMT_SETTING_POWERED;
408 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
409 settings |= MGMT_SETTING_CONNECTABLE;
411 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
412 settings |= MGMT_SETTING_DISCOVERABLE;
414 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
415 settings |= MGMT_SETTING_PAIRABLE;
417 if (lmp_bredr_capable(hdev))
418 settings |= MGMT_SETTING_BREDR;
420 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
421 settings |= MGMT_SETTING_LE;
423 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
424 settings |= MGMT_SETTING_LINK_SECURITY;
426 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
427 settings |= MGMT_SETTING_SSP;
429 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
430 settings |= MGMT_SETTING_HS;
435 #define PNP_INFO_SVCLASS_ID 0x1200
437 static u8 bluetooth_base_uuid[] = {
438 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80,
439 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
442 static u16 get_uuid16(u8 *uuid128)
447 for (i = 0; i < 12; i++) {
448 if (bluetooth_base_uuid[i] != uuid128[i])
452 val = get_unaligned_le32(&uuid128[12]);
459 static void create_eir(struct hci_dev *hdev, u8 *data)
463 u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
464 int i, truncated = 0;
465 struct bt_uuid *uuid;
468 name_len = strlen(hdev->dev_name);
474 ptr[1] = EIR_NAME_SHORT;
476 ptr[1] = EIR_NAME_COMPLETE;
478 /* EIR Data length */
479 ptr[0] = name_len + 1;
481 memcpy(ptr + 2, hdev->dev_name, name_len);
483 eir_len += (name_len + 2);
484 ptr += (name_len + 2);
487 if (hdev->inq_tx_power) {
489 ptr[1] = EIR_TX_POWER;
490 ptr[2] = (u8) hdev->inq_tx_power;
496 if (hdev->devid_source > 0) {
498 ptr[1] = EIR_DEVICE_ID;
500 put_unaligned_le16(hdev->devid_source, ptr + 2);
501 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
502 put_unaligned_le16(hdev->devid_product, ptr + 6);
503 put_unaligned_le16(hdev->devid_version, ptr + 8);
509 memset(uuid16_list, 0, sizeof(uuid16_list));
511 /* Group all UUID16 types */
512 list_for_each_entry(uuid, &hdev->uuids, list) {
515 uuid16 = get_uuid16(uuid->uuid);
522 if (uuid16 == PNP_INFO_SVCLASS_ID)
525 /* Stop if not enough space to put next UUID */
526 if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) {
531 /* Check for duplicates */
532 for (i = 0; uuid16_list[i] != 0; i++)
533 if (uuid16_list[i] == uuid16)
536 if (uuid16_list[i] == 0) {
537 uuid16_list[i] = uuid16;
538 eir_len += sizeof(u16);
542 if (uuid16_list[0] != 0) {
546 ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL;
551 for (i = 0; uuid16_list[i] != 0; i++) {
552 *ptr++ = (uuid16_list[i] & 0x00ff);
553 *ptr++ = (uuid16_list[i] & 0xff00) >> 8;
556 /* EIR Data length */
557 *length = (i * sizeof(u16)) + 1;
561 static int update_eir(struct hci_dev *hdev)
563 struct hci_cp_write_eir cp;
565 if (!hdev_is_powered(hdev))
568 if (!(hdev->features[6] & LMP_EXT_INQ))
571 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
574 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
577 memset(&cp, 0, sizeof(cp));
579 create_eir(hdev, cp.data);
581 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
584 memcpy(hdev->eir, cp.data, sizeof(cp.data));
586 return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
589 static u8 get_service_classes(struct hci_dev *hdev)
591 struct bt_uuid *uuid;
594 list_for_each_entry(uuid, &hdev->uuids, list)
595 val |= uuid->svc_hint;
600 static int update_class(struct hci_dev *hdev)
605 BT_DBG("%s", hdev->name);
607 if (!hdev_is_powered(hdev))
610 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
613 cod[0] = hdev->minor_class;
614 cod[1] = hdev->major_class;
615 cod[2] = get_service_classes(hdev);
617 if (memcmp(cod, hdev->dev_class, 3) == 0)
620 err = hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
622 set_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
627 static void service_cache_off(struct work_struct *work)
629 struct hci_dev *hdev = container_of(work, struct hci_dev,
632 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
640 hci_dev_unlock(hdev);
643 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
645 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
648 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
650 /* Non-mgmt controlled devices get this bit set
651 * implicitly so that pairing works for them, however
652 * for mgmt we require user-space to explicitly enable
655 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
658 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
659 void *data, u16 data_len)
661 struct mgmt_rp_read_info rp;
663 BT_DBG("sock %p %s", sk, hdev->name);
667 memset(&rp, 0, sizeof(rp));
669 bacpy(&rp.bdaddr, &hdev->bdaddr);
671 rp.version = hdev->hci_ver;
672 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
674 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
675 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
677 memcpy(rp.dev_class, hdev->dev_class, 3);
679 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
680 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
682 hci_dev_unlock(hdev);
684 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
688 static void mgmt_pending_free(struct pending_cmd *cmd)
695 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
696 struct hci_dev *hdev, void *data,
699 struct pending_cmd *cmd;
701 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
705 cmd->opcode = opcode;
706 cmd->index = hdev->id;
708 cmd->param = kmalloc(len, GFP_KERNEL);
715 memcpy(cmd->param, data, len);
720 list_add(&cmd->list, &hdev->mgmt_pending);
725 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
726 void (*cb)(struct pending_cmd *cmd,
730 struct list_head *p, *n;
732 list_for_each_safe(p, n, &hdev->mgmt_pending) {
733 struct pending_cmd *cmd;
735 cmd = list_entry(p, struct pending_cmd, list);
737 if (opcode > 0 && cmd->opcode != opcode)
744 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
746 struct pending_cmd *cmd;
748 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
749 if (cmd->opcode == opcode)
756 static void mgmt_pending_remove(struct pending_cmd *cmd)
758 list_del(&cmd->list);
759 mgmt_pending_free(cmd);
762 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
764 __le32 settings = cpu_to_le32(get_current_settings(hdev));
766 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
770 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
773 struct mgmt_mode *cp = data;
774 struct pending_cmd *cmd;
777 BT_DBG("request for %s", hdev->name);
781 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
782 cancel_delayed_work(&hdev->power_off);
785 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
786 mgmt_powered(hdev, 1);
791 if (!!cp->val == hdev_is_powered(hdev)) {
792 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
796 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
797 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
802 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
809 schedule_work(&hdev->power_on);
811 schedule_work(&hdev->power_off.work);
816 hci_dev_unlock(hdev);
820 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
821 struct sock *skip_sk)
824 struct mgmt_hdr *hdr;
826 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
830 hdr = (void *) skb_put(skb, sizeof(*hdr));
831 hdr->opcode = cpu_to_le16(event);
833 hdr->index = cpu_to_le16(hdev->id);
835 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
836 hdr->len = cpu_to_le16(data_len);
839 memcpy(skb_put(skb, data_len), data, data_len);
842 __net_timestamp(skb);
844 hci_send_to_control(skb, skip_sk);
850 static int new_settings(struct hci_dev *hdev, struct sock *skip)
854 ev = cpu_to_le32(get_current_settings(hdev));
856 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
859 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
862 struct mgmt_cp_set_discoverable *cp = data;
863 struct pending_cmd *cmd;
868 BT_DBG("request for %s", hdev->name);
870 timeout = __le16_to_cpu(cp->timeout);
871 if (!cp->val && timeout > 0)
872 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
873 MGMT_STATUS_INVALID_PARAMS);
877 if (!hdev_is_powered(hdev) && timeout > 0) {
878 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
879 MGMT_STATUS_NOT_POWERED);
883 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
884 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
885 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
890 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
891 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
892 MGMT_STATUS_REJECTED);
896 if (!hdev_is_powered(hdev)) {
897 bool changed = false;
899 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
900 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
904 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
909 err = new_settings(hdev, sk);
914 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
915 if (hdev->discov_timeout > 0) {
916 cancel_delayed_work(&hdev->discov_off);
917 hdev->discov_timeout = 0;
920 if (cp->val && timeout > 0) {
921 hdev->discov_timeout = timeout;
922 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
923 msecs_to_jiffies(hdev->discov_timeout * 1000));
926 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
930 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
939 scan |= SCAN_INQUIRY;
941 cancel_delayed_work(&hdev->discov_off);
943 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
945 mgmt_pending_remove(cmd);
948 hdev->discov_timeout = timeout;
951 hci_dev_unlock(hdev);
955 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
958 struct mgmt_mode *cp = data;
959 struct pending_cmd *cmd;
963 BT_DBG("request for %s", hdev->name);
967 if (!hdev_is_powered(hdev)) {
968 bool changed = false;
970 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
974 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
976 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
977 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
980 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
985 err = new_settings(hdev, sk);
990 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
991 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
992 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
997 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
998 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1002 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1013 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1014 hdev->discov_timeout > 0)
1015 cancel_delayed_work(&hdev->discov_off);
1018 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1020 mgmt_pending_remove(cmd);
1023 hci_dev_unlock(hdev);
1027 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1030 struct mgmt_mode *cp = data;
1033 BT_DBG("request for %s", hdev->name);
1038 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1040 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1042 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1046 err = new_settings(hdev, sk);
1049 hci_dev_unlock(hdev);
1053 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1056 struct mgmt_mode *cp = data;
1057 struct pending_cmd *cmd;
1061 BT_DBG("request for %s", hdev->name);
1065 if (!hdev_is_powered(hdev)) {
1066 bool changed = false;
1068 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1069 &hdev->dev_flags)) {
1070 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1074 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1079 err = new_settings(hdev, sk);
1084 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1085 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1092 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1093 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1097 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1103 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1105 mgmt_pending_remove(cmd);
1110 hci_dev_unlock(hdev);
1114 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1116 struct mgmt_mode *cp = data;
1117 struct pending_cmd *cmd;
1121 BT_DBG("request for %s", hdev->name);
1125 if (!lmp_ssp_capable(hdev)) {
1126 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1127 MGMT_STATUS_NOT_SUPPORTED);
1133 if (!hdev_is_powered(hdev)) {
1134 bool changed = false;
1136 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1137 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1141 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1146 err = new_settings(hdev, sk);
1151 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1152 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1157 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1158 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1162 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1168 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1170 mgmt_pending_remove(cmd);
1175 hci_dev_unlock(hdev);
1179 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1181 struct mgmt_mode *cp = data;
1183 BT_DBG("request for %s", hdev->name);
1186 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1187 MGMT_STATUS_NOT_SUPPORTED);
1190 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1192 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1194 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1197 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1199 struct mgmt_mode *cp = data;
1200 struct hci_cp_write_le_host_supported hci_cp;
1201 struct pending_cmd *cmd;
1205 BT_DBG("request for %s", hdev->name);
1209 if (!lmp_le_capable(hdev)) {
1210 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1211 MGMT_STATUS_NOT_SUPPORTED);
1216 enabled = !!(hdev->host_features[0] & LMP_HOST_LE);
1218 if (!hdev_is_powered(hdev) || val == enabled) {
1219 bool changed = false;
1221 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1222 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1226 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1231 err = new_settings(hdev, sk);
1236 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1237 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1242 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1248 memset(&hci_cp, 0, sizeof(hci_cp));
1252 hci_cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
1255 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1258 mgmt_pending_remove(cmd);
1261 hci_dev_unlock(hdev);
1265 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1267 struct mgmt_cp_add_uuid *cp = data;
1268 struct pending_cmd *cmd;
1269 struct bt_uuid *uuid;
1272 BT_DBG("request for %s", hdev->name);
1276 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1277 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1282 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1288 memcpy(uuid->uuid, cp->uuid, 16);
1289 uuid->svc_hint = cp->svc_hint;
1291 list_add(&uuid->list, &hdev->uuids);
1293 err = update_class(hdev);
1297 err = update_eir(hdev);
1301 if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1302 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1303 hdev->dev_class, 3);
1307 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1312 hci_dev_unlock(hdev);
1316 static bool enable_service_cache(struct hci_dev *hdev)
1318 if (!hdev_is_powered(hdev))
1321 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1322 schedule_delayed_work(&hdev->service_cache, CACHE_TIMEOUT);
1329 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1332 struct mgmt_cp_remove_uuid *cp = data;
1333 struct pending_cmd *cmd;
1334 struct list_head *p, *n;
1335 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1338 BT_DBG("request for %s", hdev->name);
1342 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1343 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1348 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1349 err = hci_uuids_clear(hdev);
1351 if (enable_service_cache(hdev)) {
1352 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1353 0, hdev->dev_class, 3);
1362 list_for_each_safe(p, n, &hdev->uuids) {
1363 struct bt_uuid *match = list_entry(p, struct bt_uuid, list);
1365 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1368 list_del(&match->list);
1373 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1374 MGMT_STATUS_INVALID_PARAMS);
1379 err = update_class(hdev);
1383 err = update_eir(hdev);
1387 if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1388 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1389 hdev->dev_class, 3);
1393 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1398 hci_dev_unlock(hdev);
1402 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1405 struct mgmt_cp_set_dev_class *cp = data;
1406 struct pending_cmd *cmd;
1409 BT_DBG("request for %s", hdev->name);
1413 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1414 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1419 hdev->major_class = cp->major;
1420 hdev->minor_class = cp->minor;
1422 if (!hdev_is_powered(hdev)) {
1423 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1424 hdev->dev_class, 3);
1428 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1429 hci_dev_unlock(hdev);
1430 cancel_delayed_work_sync(&hdev->service_cache);
1435 err = update_class(hdev);
1439 if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1440 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1441 hdev->dev_class, 3);
1445 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1450 hci_dev_unlock(hdev);
1454 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1457 struct mgmt_cp_load_link_keys *cp = data;
1458 u16 key_count, expected_len;
1461 key_count = __le16_to_cpu(cp->key_count);
1463 expected_len = sizeof(*cp) + key_count *
1464 sizeof(struct mgmt_link_key_info);
1465 if (expected_len != len) {
1466 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1468 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1469 MGMT_STATUS_INVALID_PARAMS);
1472 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1477 hci_link_keys_clear(hdev);
1479 set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
1482 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1484 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1486 for (i = 0; i < key_count; i++) {
1487 struct mgmt_link_key_info *key = &cp->keys[i];
1489 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1490 key->type, key->pin_len);
1493 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1495 hci_dev_unlock(hdev);
1500 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1501 u8 addr_type, struct sock *skip_sk)
1503 struct mgmt_ev_device_unpaired ev;
1505 bacpy(&ev.addr.bdaddr, bdaddr);
1506 ev.addr.type = addr_type;
1508 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1512 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1515 struct mgmt_cp_unpair_device *cp = data;
1516 struct mgmt_rp_unpair_device rp;
1517 struct hci_cp_disconnect dc;
1518 struct pending_cmd *cmd;
1519 struct hci_conn *conn;
1524 memset(&rp, 0, sizeof(rp));
1525 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1526 rp.addr.type = cp->addr.type;
1528 if (!hdev_is_powered(hdev)) {
1529 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1530 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1534 if (cp->addr.type == BDADDR_BREDR)
1535 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1537 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1540 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1541 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1545 if (cp->disconnect) {
1546 if (cp->addr.type == BDADDR_BREDR)
1547 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1550 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1557 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1559 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1563 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1570 dc.handle = cpu_to_le16(conn->handle);
1571 dc.reason = 0x13; /* Remote User Terminated Connection */
1572 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1574 mgmt_pending_remove(cmd);
1577 hci_dev_unlock(hdev);
1581 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1584 struct mgmt_cp_disconnect *cp = data;
1585 struct hci_cp_disconnect dc;
1586 struct pending_cmd *cmd;
1587 struct hci_conn *conn;
1594 if (!test_bit(HCI_UP, &hdev->flags)) {
1595 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
1596 MGMT_STATUS_NOT_POWERED);
1600 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1601 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
1606 if (cp->addr.type == BDADDR_BREDR)
1607 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1610 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1612 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1613 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT,
1614 MGMT_STATUS_NOT_CONNECTED);
1618 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1624 dc.handle = cpu_to_le16(conn->handle);
1625 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1627 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1629 mgmt_pending_remove(cmd);
1632 hci_dev_unlock(hdev);
1636 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1638 switch (link_type) {
1640 switch (addr_type) {
1641 case ADDR_LE_DEV_PUBLIC:
1642 return BDADDR_LE_PUBLIC;
1645 /* Fallback to LE Random address type */
1646 return BDADDR_LE_RANDOM;
1650 /* Fallback to BR/EDR type */
1651 return BDADDR_BREDR;
1655 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1658 struct mgmt_rp_get_connections *rp;
1668 if (!hdev_is_powered(hdev)) {
1669 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1670 MGMT_STATUS_NOT_POWERED);
1675 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1676 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1680 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1681 rp = kmalloc(rp_len, GFP_KERNEL);
1688 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1689 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1691 bacpy(&rp->addr[i].bdaddr, &c->dst);
1692 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1693 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1698 rp->conn_count = cpu_to_le16(i);
1700 /* Recalculate length in case of filtered SCO connections, etc */
1701 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1703 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1709 hci_dev_unlock(hdev);
1713 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1714 struct mgmt_cp_pin_code_neg_reply *cp)
1716 struct pending_cmd *cmd;
1719 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
1724 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1725 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
1727 mgmt_pending_remove(cmd);
1732 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
1735 struct hci_conn *conn;
1736 struct mgmt_cp_pin_code_reply *cp = data;
1737 struct hci_cp_pin_code_reply reply;
1738 struct pending_cmd *cmd;
1745 if (!hdev_is_powered(hdev)) {
1746 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1747 MGMT_STATUS_NOT_POWERED);
1751 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
1753 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1754 MGMT_STATUS_NOT_CONNECTED);
1758 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
1759 struct mgmt_cp_pin_code_neg_reply ncp;
1761 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
1763 BT_ERR("PIN code is not 16 bytes long");
1765 err = send_pin_code_neg_reply(sk, hdev, &ncp);
1767 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
1768 MGMT_STATUS_INVALID_PARAMS);
1773 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
1779 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
1780 reply.pin_len = cp->pin_len;
1781 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
1783 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
1785 mgmt_pending_remove(cmd);
1788 hci_dev_unlock(hdev);
1792 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
1795 struct mgmt_cp_set_io_capability *cp = data;
1801 hdev->io_capability = cp->io_capability;
1803 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1804 hdev->io_capability);
1806 hci_dev_unlock(hdev);
1808 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
1812 static struct pending_cmd *find_pairing(struct hci_conn *conn)
1814 struct hci_dev *hdev = conn->hdev;
1815 struct pending_cmd *cmd;
1817 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1818 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
1821 if (cmd->user_data != conn)
1830 static void pairing_complete(struct pending_cmd *cmd, u8 status)
1832 struct mgmt_rp_pair_device rp;
1833 struct hci_conn *conn = cmd->user_data;
1835 bacpy(&rp.addr.bdaddr, &conn->dst);
1836 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
1838 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
1841 /* So we don't get further callbacks for this connection */
1842 conn->connect_cfm_cb = NULL;
1843 conn->security_cfm_cb = NULL;
1844 conn->disconn_cfm_cb = NULL;
1848 mgmt_pending_remove(cmd);
1851 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
1853 struct pending_cmd *cmd;
1855 BT_DBG("status %u", status);
1857 cmd = find_pairing(conn);
1859 BT_DBG("Unable to find a pending command");
1861 pairing_complete(cmd, mgmt_status(status));
1864 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
1866 struct pending_cmd *cmd;
1868 BT_DBG("status %u", status);
1873 cmd = find_pairing(conn);
1875 BT_DBG("Unable to find a pending command");
1877 pairing_complete(cmd, mgmt_status(status));
1880 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1883 struct mgmt_cp_pair_device *cp = data;
1884 struct mgmt_rp_pair_device rp;
1885 struct pending_cmd *cmd;
1886 u8 sec_level, auth_type;
1887 struct hci_conn *conn;
1894 if (!hdev_is_powered(hdev)) {
1895 err = cmd_status(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1896 MGMT_STATUS_NOT_POWERED);
1900 sec_level = BT_SECURITY_MEDIUM;
1901 if (cp->io_cap == 0x03)
1902 auth_type = HCI_AT_DEDICATED_BONDING;
1904 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1906 if (cp->addr.type == BDADDR_BREDR)
1907 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
1908 cp->addr.type, sec_level, auth_type);
1910 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
1911 cp->addr.type, sec_level, auth_type);
1913 memset(&rp, 0, sizeof(rp));
1914 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1915 rp.addr.type = cp->addr.type;
1920 if (PTR_ERR(conn) == -EBUSY)
1921 status = MGMT_STATUS_BUSY;
1923 status = MGMT_STATUS_CONNECT_FAILED;
1925 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1931 if (conn->connect_cfm_cb) {
1933 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1934 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1938 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
1945 /* For LE, just connecting isn't a proof that the pairing finished */
1946 if (cp->addr.type == BDADDR_BREDR)
1947 conn->connect_cfm_cb = pairing_complete_cb;
1949 conn->connect_cfm_cb = le_connect_complete_cb;
1951 conn->security_cfm_cb = pairing_complete_cb;
1952 conn->disconn_cfm_cb = pairing_complete_cb;
1953 conn->io_capability = cp->io_cap;
1954 cmd->user_data = conn;
1956 if (conn->state == BT_CONNECTED &&
1957 hci_conn_security(conn, sec_level, auth_type))
1958 pairing_complete(cmd, 0);
1963 hci_dev_unlock(hdev);
1967 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1970 struct mgmt_addr_info *addr = data;
1971 struct pending_cmd *cmd;
1972 struct hci_conn *conn;
1979 if (!hdev_is_powered(hdev)) {
1980 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
1981 MGMT_STATUS_NOT_POWERED);
1985 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
1987 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
1988 MGMT_STATUS_INVALID_PARAMS);
1992 conn = cmd->user_data;
1994 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
1995 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
1996 MGMT_STATUS_INVALID_PARAMS);
2000 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2002 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2003 addr, sizeof(*addr));
2005 hci_dev_unlock(hdev);
2009 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2010 bdaddr_t *bdaddr, u8 type, u16 mgmt_op,
2011 u16 hci_op, __le32 passkey)
2013 struct pending_cmd *cmd;
2014 struct hci_conn *conn;
2019 if (!hdev_is_powered(hdev)) {
2020 err = cmd_status(sk, hdev->id, mgmt_op,
2021 MGMT_STATUS_NOT_POWERED);
2025 if (type == BDADDR_BREDR)
2026 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
2028 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
2031 err = cmd_status(sk, hdev->id, mgmt_op,
2032 MGMT_STATUS_NOT_CONNECTED);
2036 if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
2037 /* Continue with pairing via SMP */
2038 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2041 err = cmd_status(sk, hdev->id, mgmt_op,
2042 MGMT_STATUS_SUCCESS);
2044 err = cmd_status(sk, hdev->id, mgmt_op,
2045 MGMT_STATUS_FAILED);
2050 cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr));
2056 /* Continue with pairing via HCI */
2057 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2058 struct hci_cp_user_passkey_reply cp;
2060 bacpy(&cp.bdaddr, bdaddr);
2061 cp.passkey = passkey;
2062 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2064 err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr);
2067 mgmt_pending_remove(cmd);
2070 hci_dev_unlock(hdev);
2074 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2075 void *data, u16 len)
2077 struct mgmt_cp_pin_code_neg_reply *cp = data;
2081 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2082 MGMT_OP_PIN_CODE_NEG_REPLY,
2083 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2086 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2089 struct mgmt_cp_user_confirm_reply *cp = data;
2093 if (len != sizeof(*cp))
2094 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2095 MGMT_STATUS_INVALID_PARAMS);
2097 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2098 MGMT_OP_USER_CONFIRM_REPLY,
2099 HCI_OP_USER_CONFIRM_REPLY, 0);
2102 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2103 void *data, u16 len)
2105 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2109 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2110 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2111 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2114 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2117 struct mgmt_cp_user_passkey_reply *cp = data;
2121 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2122 MGMT_OP_USER_PASSKEY_REPLY,
2123 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2126 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2127 void *data, u16 len)
2129 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2133 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
2134 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2135 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2138 static int update_name(struct hci_dev *hdev, const char *name)
2140 struct hci_cp_write_local_name cp;
2142 memcpy(cp.name, name, sizeof(cp.name));
2144 return hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2147 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2150 struct mgmt_cp_set_local_name *cp = data;
2151 struct pending_cmd *cmd;
2158 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2160 if (!hdev_is_powered(hdev)) {
2161 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2163 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2168 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2174 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2180 err = update_name(hdev, cp->name);
2182 mgmt_pending_remove(cmd);
2185 hci_dev_unlock(hdev);
2189 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2190 void *data, u16 data_len)
2192 struct pending_cmd *cmd;
2195 BT_DBG("%s", hdev->name);
2199 if (!hdev_is_powered(hdev)) {
2200 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2201 MGMT_STATUS_NOT_POWERED);
2205 if (!lmp_ssp_capable(hdev)) {
2206 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2207 MGMT_STATUS_NOT_SUPPORTED);
2211 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2212 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2217 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2223 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2225 mgmt_pending_remove(cmd);
2228 hci_dev_unlock(hdev);
2232 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2233 void *data, u16 len)
2235 struct mgmt_cp_add_remote_oob_data *cp = data;
2239 BT_DBG("%s ", hdev->name);
2243 if (!hdev_is_powered(hdev)) {
2244 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
2245 MGMT_STATUS_NOT_POWERED, &cp->addr,
2250 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2253 status = MGMT_STATUS_FAILED;
2257 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2258 &cp->addr, sizeof(cp->addr));
2261 hci_dev_unlock(hdev);
2265 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2266 void *data, u16 len)
2268 struct mgmt_cp_remove_remote_oob_data *cp = data;
2272 BT_DBG("%s", hdev->name);
2276 if (!hdev_is_powered(hdev)) {
2277 err = cmd_complete(sk, hdev->id,
2278 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2279 MGMT_STATUS_NOT_POWERED, &cp->addr,
2284 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2286 status = MGMT_STATUS_INVALID_PARAMS;
2290 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2291 status, &cp->addr, sizeof(cp->addr));
2294 hci_dev_unlock(hdev);
2298 int mgmt_interleaved_discovery(struct hci_dev *hdev)
2302 BT_DBG("%s", hdev->name);
2306 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
2308 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2310 hci_dev_unlock(hdev);
2315 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2316 void *data, u16 len)
2318 struct mgmt_cp_start_discovery *cp = data;
2319 struct pending_cmd *cmd;
2322 BT_DBG("%s", hdev->name);
2326 if (!hdev_is_powered(hdev)) {
2327 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2328 MGMT_STATUS_NOT_POWERED);
2332 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2333 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2338 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2339 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2344 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2350 hdev->discovery.type = cp->type;
2352 switch (hdev->discovery.type) {
2353 case DISCOV_TYPE_BREDR:
2354 if (lmp_bredr_capable(hdev))
2355 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2360 case DISCOV_TYPE_LE:
2361 if (lmp_host_le_capable(hdev))
2362 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2363 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2368 case DISCOV_TYPE_INTERLEAVED:
2369 if (lmp_host_le_capable(hdev) && lmp_bredr_capable(hdev))
2370 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2372 LE_SCAN_TIMEOUT_BREDR_LE);
2382 mgmt_pending_remove(cmd);
2384 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2387 hci_dev_unlock(hdev);
2391 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2394 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2395 struct pending_cmd *cmd;
2396 struct hci_cp_remote_name_req_cancel cp;
2397 struct inquiry_entry *e;
2400 BT_DBG("%s", hdev->name);
2404 if (!hci_discovery_active(hdev)) {
2405 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2406 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2407 sizeof(mgmt_cp->type));
2411 if (hdev->discovery.type != mgmt_cp->type) {
2412 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2413 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2414 sizeof(mgmt_cp->type));
2418 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2424 switch (hdev->discovery.state) {
2425 case DISCOVERY_FINDING:
2426 if (test_bit(HCI_INQUIRY, &hdev->flags))
2427 err = hci_cancel_inquiry(hdev);
2429 err = hci_cancel_le_scan(hdev);
2433 case DISCOVERY_RESOLVING:
2434 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2437 mgmt_pending_remove(cmd);
2438 err = cmd_complete(sk, hdev->id,
2439 MGMT_OP_STOP_DISCOVERY, 0,
2441 sizeof(mgmt_cp->type));
2442 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2446 bacpy(&cp.bdaddr, &e->data.bdaddr);
2447 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2453 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2458 mgmt_pending_remove(cmd);
2460 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2463 hci_dev_unlock(hdev);
2467 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2470 struct mgmt_cp_confirm_name *cp = data;
2471 struct inquiry_entry *e;
2474 BT_DBG("%s", hdev->name);
2478 if (!hci_discovery_active(hdev)) {
2479 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2480 MGMT_STATUS_FAILED);
2484 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2486 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2487 MGMT_STATUS_INVALID_PARAMS);
2491 if (cp->name_known) {
2492 e->name_state = NAME_KNOWN;
2495 e->name_state = NAME_NEEDED;
2496 hci_inquiry_cache_update_resolve(hdev, e);
2502 hci_dev_unlock(hdev);
2506 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2509 struct mgmt_cp_block_device *cp = data;
2513 BT_DBG("%s", hdev->name);
2517 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2519 status = MGMT_STATUS_FAILED;
2523 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2524 &cp->addr, sizeof(cp->addr));
2526 hci_dev_unlock(hdev);
2531 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2534 struct mgmt_cp_unblock_device *cp = data;
2538 BT_DBG("%s", hdev->name);
2542 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2544 status = MGMT_STATUS_INVALID_PARAMS;
2548 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2549 &cp->addr, sizeof(cp->addr));
2551 hci_dev_unlock(hdev);
2556 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2559 struct mgmt_cp_set_device_id *cp = data;
2563 BT_DBG("%s", hdev->name);
2565 source = __le16_to_cpu(cp->source);
2567 if (source > 0x0002)
2568 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2569 MGMT_STATUS_INVALID_PARAMS);
2573 hdev->devid_source = source;
2574 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2575 hdev->devid_product = __le16_to_cpu(cp->product);
2576 hdev->devid_version = __le16_to_cpu(cp->version);
2578 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2582 hci_dev_unlock(hdev);
2587 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2588 void *data, u16 len)
2590 struct mgmt_mode *cp = data;
2591 struct hci_cp_write_page_scan_activity acp;
2595 BT_DBG("%s", hdev->name);
2597 if (!hdev_is_powered(hdev))
2598 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2599 MGMT_STATUS_NOT_POWERED);
2601 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2602 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2603 MGMT_STATUS_REJECTED);
2608 type = PAGE_SCAN_TYPE_INTERLACED;
2610 /* 160 msec page scan interval */
2611 acp.interval = __constant_cpu_to_le16(0x0100);
2613 type = PAGE_SCAN_TYPE_STANDARD; /* default */
2615 /* default 1.28 sec page scan */
2616 acp.interval = __constant_cpu_to_le16(0x0800);
2619 /* default 11.25 msec page scan window */
2620 acp.window = __constant_cpu_to_le16(0x0012);
2622 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp),
2625 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2626 MGMT_STATUS_FAILED);
2630 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
2632 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2633 MGMT_STATUS_FAILED);
2637 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 0,
2640 hci_dev_unlock(hdev);
2644 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2645 void *cp_data, u16 len)
2647 struct mgmt_cp_load_long_term_keys *cp = cp_data;
2648 u16 key_count, expected_len;
2651 key_count = __le16_to_cpu(cp->key_count);
2653 expected_len = sizeof(*cp) + key_count *
2654 sizeof(struct mgmt_ltk_info);
2655 if (expected_len != len) {
2656 BT_ERR("load_keys: expected %u bytes, got %u bytes",
2658 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
2662 BT_DBG("%s key_count %u", hdev->name, key_count);
2666 hci_smp_ltks_clear(hdev);
2668 for (i = 0; i < key_count; i++) {
2669 struct mgmt_ltk_info *key = &cp->keys[i];
2675 type = HCI_SMP_LTK_SLAVE;
2677 hci_add_ltk(hdev, &key->addr.bdaddr,
2678 bdaddr_to_le(key->addr.type),
2679 type, 0, key->authenticated, key->val,
2680 key->enc_size, key->ediv, key->rand);
2683 hci_dev_unlock(hdev);
2688 static const struct mgmt_handler {
2689 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
2693 } mgmt_handlers[] = {
2694 { NULL }, /* 0x0000 (no command) */
2695 { read_version, false, MGMT_READ_VERSION_SIZE },
2696 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
2697 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
2698 { read_controller_info, false, MGMT_READ_INFO_SIZE },
2699 { set_powered, false, MGMT_SETTING_SIZE },
2700 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
2701 { set_connectable, false, MGMT_SETTING_SIZE },
2702 { set_fast_connectable, false, MGMT_SETTING_SIZE },
2703 { set_pairable, false, MGMT_SETTING_SIZE },
2704 { set_link_security, false, MGMT_SETTING_SIZE },
2705 { set_ssp, false, MGMT_SETTING_SIZE },
2706 { set_hs, false, MGMT_SETTING_SIZE },
2707 { set_le, false, MGMT_SETTING_SIZE },
2708 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
2709 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
2710 { add_uuid, false, MGMT_ADD_UUID_SIZE },
2711 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
2712 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
2713 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
2714 { disconnect, false, MGMT_DISCONNECT_SIZE },
2715 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
2716 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
2717 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
2718 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
2719 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
2720 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
2721 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
2722 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
2723 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
2724 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
2725 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
2726 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
2727 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
2728 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
2729 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
2730 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
2731 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
2732 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
2733 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
2734 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
2738 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2742 struct mgmt_hdr *hdr;
2743 u16 opcode, index, len;
2744 struct hci_dev *hdev = NULL;
2745 const struct mgmt_handler *handler;
2748 BT_DBG("got %zu bytes", msglen);
2750 if (msglen < sizeof(*hdr))
2753 buf = kmalloc(msglen, GFP_KERNEL);
2757 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
2763 opcode = __le16_to_cpu(hdr->opcode);
2764 index = __le16_to_cpu(hdr->index);
2765 len = __le16_to_cpu(hdr->len);
2767 if (len != msglen - sizeof(*hdr)) {
2772 if (index != MGMT_INDEX_NONE) {
2773 hdev = hci_dev_get(index);
2775 err = cmd_status(sk, index, opcode,
2776 MGMT_STATUS_INVALID_INDEX);
2781 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
2782 mgmt_handlers[opcode].func == NULL) {
2783 BT_DBG("Unknown op %u", opcode);
2784 err = cmd_status(sk, index, opcode,
2785 MGMT_STATUS_UNKNOWN_COMMAND);
2789 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
2790 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
2791 err = cmd_status(sk, index, opcode,
2792 MGMT_STATUS_INVALID_INDEX);
2796 handler = &mgmt_handlers[opcode];
2798 if ((handler->var_len && len < handler->data_len) ||
2799 (!handler->var_len && len != handler->data_len)) {
2800 err = cmd_status(sk, index, opcode,
2801 MGMT_STATUS_INVALID_PARAMS);
2806 mgmt_init_hdev(sk, hdev);
2808 cp = buf + sizeof(*hdr);
2810 err = handler->func(sk, hdev, cp, len);
2824 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
2828 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
2829 mgmt_pending_remove(cmd);
2832 int mgmt_index_added(struct hci_dev *hdev)
2834 if (!mgmt_valid_hdev(hdev))
2837 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
2840 int mgmt_index_removed(struct hci_dev *hdev)
2842 u8 status = MGMT_STATUS_INVALID_INDEX;
2844 if (!mgmt_valid_hdev(hdev))
2847 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
2849 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
2854 struct hci_dev *hdev;
2858 static void settings_rsp(struct pending_cmd *cmd, void *data)
2860 struct cmd_lookup *match = data;
2862 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
2864 list_del(&cmd->list);
2866 if (match->sk == NULL) {
2867 match->sk = cmd->sk;
2868 sock_hold(match->sk);
2871 mgmt_pending_free(cmd);
2874 int mgmt_powered(struct hci_dev *hdev, u8 powered)
2876 struct cmd_lookup match = { NULL, hdev };
2879 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2882 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
2887 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2889 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
2890 scan |= SCAN_INQUIRY;
2893 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2895 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2898 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
2901 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2902 struct hci_cp_write_le_host_supported cp;
2905 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
2907 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2912 update_name(hdev, hdev->dev_name);
2915 u8 status = MGMT_STATUS_NOT_POWERED;
2916 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
2919 err = new_settings(hdev, match.sk);
2927 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
2929 struct cmd_lookup match = { NULL, hdev };
2930 bool changed = false;
2934 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
2937 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
2941 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
2945 err = new_settings(hdev, match.sk);
2953 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
2955 struct cmd_lookup match = { NULL, hdev };
2956 bool changed = false;
2960 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2963 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2967 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp,
2971 err = new_settings(hdev, match.sk);
2979 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
2981 u8 mgmt_err = mgmt_status(status);
2983 if (scan & SCAN_PAGE)
2984 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
2985 cmd_status_rsp, &mgmt_err);
2987 if (scan & SCAN_INQUIRY)
2988 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
2989 cmd_status_rsp, &mgmt_err);
2994 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
2997 struct mgmt_ev_new_link_key ev;
2999 memset(&ev, 0, sizeof(ev));
3001 ev.store_hint = persistent;
3002 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3003 ev.key.addr.type = BDADDR_BREDR;
3004 ev.key.type = key->type;
3005 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3006 ev.key.pin_len = key->pin_len;
3008 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3011 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3013 struct mgmt_ev_new_long_term_key ev;
3015 memset(&ev, 0, sizeof(ev));
3017 ev.store_hint = persistent;
3018 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3019 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3020 ev.key.authenticated = key->authenticated;
3021 ev.key.enc_size = key->enc_size;
3022 ev.key.ediv = key->ediv;
3024 if (key->type == HCI_SMP_LTK)
3027 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3028 memcpy(ev.key.val, key->val, sizeof(key->val));
3030 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3034 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3035 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3039 struct mgmt_ev_device_connected *ev = (void *) buf;
3042 bacpy(&ev->addr.bdaddr, bdaddr);
3043 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3045 ev->flags = __cpu_to_le32(flags);
3048 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3051 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3052 eir_len = eir_append_data(ev->eir, eir_len,
3053 EIR_CLASS_OF_DEV, dev_class, 3);
3055 ev->eir_len = cpu_to_le16(eir_len);
3057 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3058 sizeof(*ev) + eir_len, NULL);
3061 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3063 struct mgmt_cp_disconnect *cp = cmd->param;
3064 struct sock **sk = data;
3065 struct mgmt_rp_disconnect rp;
3067 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3068 rp.addr.type = cp->addr.type;
3070 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3076 mgmt_pending_remove(cmd);
3079 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3081 struct hci_dev *hdev = data;
3082 struct mgmt_cp_unpair_device *cp = cmd->param;
3083 struct mgmt_rp_unpair_device rp;
3085 memset(&rp, 0, sizeof(rp));
3086 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3087 rp.addr.type = cp->addr.type;
3089 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3091 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3093 mgmt_pending_remove(cmd);
3096 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3097 u8 link_type, u8 addr_type, u8 reason)
3099 struct mgmt_ev_device_disconnected ev;
3100 struct sock *sk = NULL;
3103 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3105 bacpy(&ev.addr.bdaddr, bdaddr);
3106 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3109 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3115 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3121 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3122 u8 link_type, u8 addr_type, u8 status)
3124 struct mgmt_rp_disconnect rp;
3125 struct pending_cmd *cmd;
3128 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3132 bacpy(&rp.addr.bdaddr, bdaddr);
3133 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3135 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3136 mgmt_status(status), &rp, sizeof(rp));
3138 mgmt_pending_remove(cmd);
3140 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3145 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3146 u8 addr_type, u8 status)
3148 struct mgmt_ev_connect_failed ev;
3150 bacpy(&ev.addr.bdaddr, bdaddr);
3151 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3152 ev.status = mgmt_status(status);
3154 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3157 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3159 struct mgmt_ev_pin_code_request ev;
3161 bacpy(&ev.addr.bdaddr, bdaddr);
3162 ev.addr.type = BDADDR_BREDR;
3165 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3169 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3172 struct pending_cmd *cmd;
3173 struct mgmt_rp_pin_code_reply rp;
3176 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3180 bacpy(&rp.addr.bdaddr, bdaddr);
3181 rp.addr.type = BDADDR_BREDR;
3183 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3184 mgmt_status(status), &rp, sizeof(rp));
3186 mgmt_pending_remove(cmd);
3191 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3194 struct pending_cmd *cmd;
3195 struct mgmt_rp_pin_code_reply rp;
3198 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3202 bacpy(&rp.addr.bdaddr, bdaddr);
3203 rp.addr.type = BDADDR_BREDR;
3205 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3206 mgmt_status(status), &rp, sizeof(rp));
3208 mgmt_pending_remove(cmd);
3213 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3214 u8 link_type, u8 addr_type, __le32 value,
3217 struct mgmt_ev_user_confirm_request ev;
3219 BT_DBG("%s", hdev->name);
3221 bacpy(&ev.addr.bdaddr, bdaddr);
3222 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3223 ev.confirm_hint = confirm_hint;
3226 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3230 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3231 u8 link_type, u8 addr_type)
3233 struct mgmt_ev_user_passkey_request ev;
3235 BT_DBG("%s", hdev->name);
3237 bacpy(&ev.addr.bdaddr, bdaddr);
3238 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3240 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3244 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3245 u8 link_type, u8 addr_type, u8 status,
3248 struct pending_cmd *cmd;
3249 struct mgmt_rp_user_confirm_reply rp;
3252 cmd = mgmt_pending_find(opcode, hdev);
3256 bacpy(&rp.addr.bdaddr, bdaddr);
3257 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3258 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3261 mgmt_pending_remove(cmd);
3266 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3267 u8 link_type, u8 addr_type, u8 status)
3269 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3270 status, MGMT_OP_USER_CONFIRM_REPLY);
3273 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3274 u8 link_type, u8 addr_type, u8 status)
3276 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3278 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3281 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3282 u8 link_type, u8 addr_type, u8 status)
3284 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3285 status, MGMT_OP_USER_PASSKEY_REPLY);
3288 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3289 u8 link_type, u8 addr_type, u8 status)
3291 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3293 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3296 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3297 u8 link_type, u8 addr_type, u32 passkey,
3300 struct mgmt_ev_passkey_notify ev;
3302 BT_DBG("%s", hdev->name);
3304 bacpy(&ev.addr.bdaddr, bdaddr);
3305 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3306 ev.passkey = __cpu_to_le32(passkey);
3307 ev.entered = entered;
3309 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3312 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3313 u8 addr_type, u8 status)
3315 struct mgmt_ev_auth_failed ev;
3317 bacpy(&ev.addr.bdaddr, bdaddr);
3318 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3319 ev.status = mgmt_status(status);
3321 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3324 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3326 struct cmd_lookup match = { NULL, hdev };
3327 bool changed = false;
3331 u8 mgmt_err = mgmt_status(status);
3332 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3333 cmd_status_rsp, &mgmt_err);
3337 if (test_bit(HCI_AUTH, &hdev->flags)) {
3338 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3341 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3345 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3349 err = new_settings(hdev, match.sk);
3357 static int clear_eir(struct hci_dev *hdev)
3359 struct hci_cp_write_eir cp;
3361 if (!(hdev->features[6] & LMP_EXT_INQ))
3364 memset(hdev->eir, 0, sizeof(hdev->eir));
3366 memset(&cp, 0, sizeof(cp));
3368 return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3371 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3373 struct cmd_lookup match = { NULL, hdev };
3374 bool changed = false;
3378 u8 mgmt_err = mgmt_status(status);
3380 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
3382 err = new_settings(hdev, NULL);
3384 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
3391 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3394 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3398 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
3401 err = new_settings(hdev, match.sk);
3406 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3414 static void class_rsp(struct pending_cmd *cmd, void *data)
3416 struct cmd_lookup *match = data;
3418 cmd_complete(cmd->sk, cmd->index, cmd->opcode, match->mgmt_status,
3419 match->hdev->dev_class, 3);
3421 list_del(&cmd->list);
3423 if (match->sk == NULL) {
3424 match->sk = cmd->sk;
3425 sock_hold(match->sk);
3428 mgmt_pending_free(cmd);
3431 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3434 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3437 clear_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
3439 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, class_rsp, &match);
3440 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, class_rsp, &match);
3441 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, class_rsp, &match);
3444 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
3453 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
3455 struct pending_cmd *cmd;
3456 struct mgmt_cp_set_local_name ev;
3457 bool changed = false;
3460 if (memcmp(name, hdev->dev_name, sizeof(hdev->dev_name)) != 0) {
3461 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3465 memset(&ev, 0, sizeof(ev));
3466 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3467 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
3469 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3473 /* Always assume that either the short or the complete name has
3474 * changed if there was a pending mgmt command */
3478 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3479 mgmt_status(status));
3483 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, &ev,
3490 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev,
3491 sizeof(ev), cmd ? cmd->sk : NULL);
3497 mgmt_pending_remove(cmd);
3501 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
3502 u8 *randomizer, u8 status)
3504 struct pending_cmd *cmd;
3507 BT_DBG("%s status %u", hdev->name, status);
3509 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3514 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3515 mgmt_status(status));
3517 struct mgmt_rp_read_local_oob_data rp;
3519 memcpy(rp.hash, hash, sizeof(rp.hash));
3520 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
3522 err = cmd_complete(cmd->sk, hdev->id,
3523 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
3527 mgmt_pending_remove(cmd);
3532 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3534 struct cmd_lookup match = { NULL, hdev };
3535 bool changed = false;
3539 u8 mgmt_err = mgmt_status(status);
3541 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
3543 err = new_settings(hdev, NULL);
3545 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
3552 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3555 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3559 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
3562 err = new_settings(hdev, match.sk);
3570 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3571 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
3572 ssp, u8 *eir, u16 eir_len)
3575 struct mgmt_ev_device_found *ev = (void *) buf;
3578 /* Leave 5 bytes for a potential CoD field */
3579 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
3582 memset(buf, 0, sizeof(buf));
3584 bacpy(&ev->addr.bdaddr, bdaddr);
3585 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3588 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
3590 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
3593 memcpy(ev->eir, eir, eir_len);
3595 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
3596 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
3599 ev->eir_len = cpu_to_le16(eir_len);
3600 ev_size = sizeof(*ev) + eir_len;
3602 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
3605 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3606 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
3608 struct mgmt_ev_device_found *ev;
3609 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
3612 ev = (struct mgmt_ev_device_found *) buf;
3614 memset(buf, 0, sizeof(buf));
3616 bacpy(&ev->addr.bdaddr, bdaddr);
3617 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3620 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
3623 ev->eir_len = cpu_to_le16(eir_len);
3625 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
3626 sizeof(*ev) + eir_len, NULL);
3629 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3631 struct pending_cmd *cmd;
3635 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3637 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3641 type = hdev->discovery.type;
3643 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3644 &type, sizeof(type));
3645 mgmt_pending_remove(cmd);
3650 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3652 struct pending_cmd *cmd;
3655 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3659 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3660 &hdev->discovery.type, sizeof(hdev->discovery.type));
3661 mgmt_pending_remove(cmd);
3666 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
3668 struct mgmt_ev_discovering ev;
3669 struct pending_cmd *cmd;
3671 BT_DBG("%s discovering %u", hdev->name, discovering);
3674 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3676 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3679 u8 type = hdev->discovery.type;
3681 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
3683 mgmt_pending_remove(cmd);
3686 memset(&ev, 0, sizeof(ev));
3687 ev.type = hdev->discovery.type;
3688 ev.discovering = discovering;
3690 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
3693 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3695 struct pending_cmd *cmd;
3696 struct mgmt_ev_device_blocked ev;
3698 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
3700 bacpy(&ev.addr.bdaddr, bdaddr);
3701 ev.addr.type = type;
3703 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
3704 cmd ? cmd->sk : NULL);
3707 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3709 struct pending_cmd *cmd;
3710 struct mgmt_ev_device_unblocked ev;
3712 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
3714 bacpy(&ev.addr.bdaddr, bdaddr);
3715 ev.addr.type = type;
3717 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
3718 cmd ? cmd->sk : NULL);
3721 module_param(enable_hs, bool, 0644);
3722 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");