2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
39 #define MGMT_VERSION 1
40 #define MGMT_REVISION 9
42 static const u16 mgmt_commands[] = {
43 MGMT_OP_READ_INDEX_LIST,
46 MGMT_OP_SET_DISCOVERABLE,
47 MGMT_OP_SET_CONNECTABLE,
48 MGMT_OP_SET_FAST_CONNECTABLE,
50 MGMT_OP_SET_LINK_SECURITY,
54 MGMT_OP_SET_DEV_CLASS,
55 MGMT_OP_SET_LOCAL_NAME,
58 MGMT_OP_LOAD_LINK_KEYS,
59 MGMT_OP_LOAD_LONG_TERM_KEYS,
61 MGMT_OP_GET_CONNECTIONS,
62 MGMT_OP_PIN_CODE_REPLY,
63 MGMT_OP_PIN_CODE_NEG_REPLY,
64 MGMT_OP_SET_IO_CAPABILITY,
66 MGMT_OP_CANCEL_PAIR_DEVICE,
67 MGMT_OP_UNPAIR_DEVICE,
68 MGMT_OP_USER_CONFIRM_REPLY,
69 MGMT_OP_USER_CONFIRM_NEG_REPLY,
70 MGMT_OP_USER_PASSKEY_REPLY,
71 MGMT_OP_USER_PASSKEY_NEG_REPLY,
72 MGMT_OP_READ_LOCAL_OOB_DATA,
73 MGMT_OP_ADD_REMOTE_OOB_DATA,
74 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
75 MGMT_OP_START_DISCOVERY,
76 MGMT_OP_STOP_DISCOVERY,
79 MGMT_OP_UNBLOCK_DEVICE,
80 MGMT_OP_SET_DEVICE_ID,
81 MGMT_OP_SET_ADVERTISING,
83 MGMT_OP_SET_STATIC_ADDRESS,
84 MGMT_OP_SET_SCAN_PARAMS,
85 MGMT_OP_SET_SECURE_CONN,
86 MGMT_OP_SET_DEBUG_KEYS,
89 MGMT_OP_GET_CONN_INFO,
90 MGMT_OP_GET_CLOCK_INFO,
92 MGMT_OP_REMOVE_DEVICE,
93 MGMT_OP_LOAD_CONN_PARAM,
94 MGMT_OP_READ_UNCONF_INDEX_LIST,
95 MGMT_OP_READ_CONFIG_INFO,
96 MGMT_OP_SET_EXTERNAL_CONFIG,
97 MGMT_OP_SET_PUBLIC_ADDRESS,
98 MGMT_OP_START_SERVICE_DISCOVERY,
99 MGMT_OP_READ_EXT_INDEX_LIST,
102 static const u16 mgmt_events[] = {
103 MGMT_EV_CONTROLLER_ERROR,
105 MGMT_EV_INDEX_REMOVED,
106 MGMT_EV_NEW_SETTINGS,
107 MGMT_EV_CLASS_OF_DEV_CHANGED,
108 MGMT_EV_LOCAL_NAME_CHANGED,
109 MGMT_EV_NEW_LINK_KEY,
110 MGMT_EV_NEW_LONG_TERM_KEY,
111 MGMT_EV_DEVICE_CONNECTED,
112 MGMT_EV_DEVICE_DISCONNECTED,
113 MGMT_EV_CONNECT_FAILED,
114 MGMT_EV_PIN_CODE_REQUEST,
115 MGMT_EV_USER_CONFIRM_REQUEST,
116 MGMT_EV_USER_PASSKEY_REQUEST,
118 MGMT_EV_DEVICE_FOUND,
120 MGMT_EV_DEVICE_BLOCKED,
121 MGMT_EV_DEVICE_UNBLOCKED,
122 MGMT_EV_DEVICE_UNPAIRED,
123 MGMT_EV_PASSKEY_NOTIFY,
126 MGMT_EV_DEVICE_ADDED,
127 MGMT_EV_DEVICE_REMOVED,
128 MGMT_EV_NEW_CONN_PARAM,
129 MGMT_EV_UNCONF_INDEX_ADDED,
130 MGMT_EV_UNCONF_INDEX_REMOVED,
131 MGMT_EV_NEW_CONFIG_OPTIONS,
132 MGMT_EV_EXT_INDEX_ADDED,
133 MGMT_EV_EXT_INDEX_REMOVED,
136 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
138 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
139 "\x00\x00\x00\x00\x00\x00\x00\x00"
141 struct mgmt_pending_cmd {
142 struct list_head list;
149 int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status);
152 /* HCI to MGMT error code conversion table */
153 static u8 mgmt_status_table[] = {
155 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
156 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
157 MGMT_STATUS_FAILED, /* Hardware Failure */
158 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
159 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
160 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
161 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
162 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
163 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
164 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
165 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
166 MGMT_STATUS_BUSY, /* Command Disallowed */
167 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
168 MGMT_STATUS_REJECTED, /* Rejected Security */
169 MGMT_STATUS_REJECTED, /* Rejected Personal */
170 MGMT_STATUS_TIMEOUT, /* Host Timeout */
171 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
172 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
173 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
174 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
175 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
176 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
177 MGMT_STATUS_BUSY, /* Repeated Attempts */
178 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
179 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
180 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
181 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
182 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
183 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
184 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
185 MGMT_STATUS_FAILED, /* Unspecified Error */
186 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
187 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
188 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
189 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
190 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
191 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
192 MGMT_STATUS_FAILED, /* Unit Link Key Used */
193 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
194 MGMT_STATUS_TIMEOUT, /* Instant Passed */
195 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
196 MGMT_STATUS_FAILED, /* Transaction Collision */
197 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
198 MGMT_STATUS_REJECTED, /* QoS Rejected */
199 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
200 MGMT_STATUS_REJECTED, /* Insufficient Security */
201 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
202 MGMT_STATUS_BUSY, /* Role Switch Pending */
203 MGMT_STATUS_FAILED, /* Slot Violation */
204 MGMT_STATUS_FAILED, /* Role Switch Failed */
205 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
206 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
207 MGMT_STATUS_BUSY, /* Host Busy Pairing */
208 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
209 MGMT_STATUS_BUSY, /* Controller Busy */
210 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
211 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
212 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
213 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
214 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
217 static u8 mgmt_status(u8 hci_status)
219 if (hci_status < ARRAY_SIZE(mgmt_status_table))
220 return mgmt_status_table[hci_status];
222 return MGMT_STATUS_FAILED;
225 static int mgmt_send_event(u16 event, struct hci_dev *hdev,
226 unsigned short channel, void *data, u16 data_len,
227 struct sock *skip_sk)
230 struct mgmt_hdr *hdr;
232 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
236 hdr = (void *) skb_put(skb, sizeof(*hdr));
237 hdr->opcode = cpu_to_le16(event);
239 hdr->index = cpu_to_le16(hdev->id);
241 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
242 hdr->len = cpu_to_le16(data_len);
245 memcpy(skb_put(skb, data_len), data, data_len);
248 __net_timestamp(skb);
250 hci_send_to_channel(channel, skb, skip_sk);
256 static int mgmt_index_event(u16 event, struct hci_dev *hdev,
257 void *data, u16 data_len, int flag)
260 struct mgmt_hdr *hdr;
262 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
266 hdr = (void *) skb_put(skb, sizeof(*hdr));
267 hdr->opcode = cpu_to_le16(event);
268 hdr->index = cpu_to_le16(hdev->id);
269 hdr->len = cpu_to_le16(data_len);
272 memcpy(skb_put(skb, data_len), data, data_len);
275 __net_timestamp(skb);
277 hci_send_to_flagged_channel(HCI_CHANNEL_CONTROL, skb, flag);
283 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
284 struct sock *skip_sk)
286 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
290 static int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
293 struct mgmt_hdr *hdr;
294 struct mgmt_ev_cmd_status *ev;
297 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
299 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
303 hdr = (void *) skb_put(skb, sizeof(*hdr));
305 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
306 hdr->index = cpu_to_le16(index);
307 hdr->len = cpu_to_le16(sizeof(*ev));
309 ev = (void *) skb_put(skb, sizeof(*ev));
311 ev->opcode = cpu_to_le16(cmd);
313 err = sock_queue_rcv_skb(sk, skb);
320 static int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
321 void *rp, size_t rp_len)
324 struct mgmt_hdr *hdr;
325 struct mgmt_ev_cmd_complete *ev;
328 BT_DBG("sock %p", sk);
330 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
334 hdr = (void *) skb_put(skb, sizeof(*hdr));
336 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
337 hdr->index = cpu_to_le16(index);
338 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
340 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
341 ev->opcode = cpu_to_le16(cmd);
345 memcpy(ev->data, rp, rp_len);
347 err = sock_queue_rcv_skb(sk, skb);
354 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
357 struct mgmt_rp_read_version rp;
359 BT_DBG("sock %p", sk);
361 rp.version = MGMT_VERSION;
362 rp.revision = cpu_to_le16(MGMT_REVISION);
364 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
368 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
371 struct mgmt_rp_read_commands *rp;
372 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
373 const u16 num_events = ARRAY_SIZE(mgmt_events);
378 BT_DBG("sock %p", sk);
380 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
382 rp = kmalloc(rp_size, GFP_KERNEL);
386 rp->num_commands = cpu_to_le16(num_commands);
387 rp->num_events = cpu_to_le16(num_events);
389 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
390 put_unaligned_le16(mgmt_commands[i], opcode);
392 for (i = 0; i < num_events; i++, opcode++)
393 put_unaligned_le16(mgmt_events[i], opcode);
395 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
402 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
405 struct mgmt_rp_read_index_list *rp;
411 BT_DBG("sock %p", sk);
413 read_lock(&hci_dev_list_lock);
416 list_for_each_entry(d, &hci_dev_list, list) {
417 if (d->dev_type == HCI_BREDR &&
418 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
422 rp_len = sizeof(*rp) + (2 * count);
423 rp = kmalloc(rp_len, GFP_ATOMIC);
425 read_unlock(&hci_dev_list_lock);
430 list_for_each_entry(d, &hci_dev_list, list) {
431 if (hci_dev_test_flag(d, HCI_SETUP) ||
432 hci_dev_test_flag(d, HCI_CONFIG) ||
433 hci_dev_test_flag(d, HCI_USER_CHANNEL))
436 /* Devices marked as raw-only are neither configured
437 * nor unconfigured controllers.
439 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
442 if (d->dev_type == HCI_BREDR &&
443 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
444 rp->index[count++] = cpu_to_le16(d->id);
445 BT_DBG("Added hci%u", d->id);
449 rp->num_controllers = cpu_to_le16(count);
450 rp_len = sizeof(*rp) + (2 * count);
452 read_unlock(&hci_dev_list_lock);
454 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
462 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
463 void *data, u16 data_len)
465 struct mgmt_rp_read_unconf_index_list *rp;
471 BT_DBG("sock %p", sk);
473 read_lock(&hci_dev_list_lock);
476 list_for_each_entry(d, &hci_dev_list, list) {
477 if (d->dev_type == HCI_BREDR &&
478 hci_dev_test_flag(d, HCI_UNCONFIGURED))
482 rp_len = sizeof(*rp) + (2 * count);
483 rp = kmalloc(rp_len, GFP_ATOMIC);
485 read_unlock(&hci_dev_list_lock);
490 list_for_each_entry(d, &hci_dev_list, list) {
491 if (hci_dev_test_flag(d, HCI_SETUP) ||
492 hci_dev_test_flag(d, HCI_CONFIG) ||
493 hci_dev_test_flag(d, HCI_USER_CHANNEL))
496 /* Devices marked as raw-only are neither configured
497 * nor unconfigured controllers.
499 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
502 if (d->dev_type == HCI_BREDR &&
503 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
504 rp->index[count++] = cpu_to_le16(d->id);
505 BT_DBG("Added hci%u", d->id);
509 rp->num_controllers = cpu_to_le16(count);
510 rp_len = sizeof(*rp) + (2 * count);
512 read_unlock(&hci_dev_list_lock);
514 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
515 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
522 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
523 void *data, u16 data_len)
525 struct mgmt_rp_read_ext_index_list *rp;
531 BT_DBG("sock %p", sk);
533 read_lock(&hci_dev_list_lock);
536 list_for_each_entry(d, &hci_dev_list, list) {
537 if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
541 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
542 rp = kmalloc(rp_len, GFP_ATOMIC);
544 read_unlock(&hci_dev_list_lock);
549 list_for_each_entry(d, &hci_dev_list, list) {
550 if (hci_dev_test_flag(d, HCI_SETUP) ||
551 hci_dev_test_flag(d, HCI_CONFIG) ||
552 hci_dev_test_flag(d, HCI_USER_CHANNEL))
555 /* Devices marked as raw-only are neither configured
556 * nor unconfigured controllers.
558 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
561 if (d->dev_type == HCI_BREDR) {
562 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
563 rp->entry[count].type = 0x01;
565 rp->entry[count].type = 0x00;
566 } else if (d->dev_type == HCI_AMP) {
567 rp->entry[count].type = 0x02;
572 rp->entry[count].bus = d->bus;
573 rp->entry[count++].index = cpu_to_le16(d->id);
574 BT_DBG("Added hci%u", d->id);
577 rp->num_controllers = cpu_to_le16(count);
578 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
580 read_unlock(&hci_dev_list_lock);
582 /* If this command is called at least once, then all the
583 * default index and unconfigured index events are disabled
584 * and from now on only extended index events are used.
586 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
587 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
588 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
590 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
591 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
598 static bool is_configured(struct hci_dev *hdev)
600 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
601 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
604 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
605 !bacmp(&hdev->public_addr, BDADDR_ANY))
611 static __le32 get_missing_options(struct hci_dev *hdev)
615 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 options |= MGMT_OPTION_EXTERNAL_CONFIG;
619 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
620 !bacmp(&hdev->public_addr, BDADDR_ANY))
621 options |= MGMT_OPTION_PUBLIC_ADDRESS;
623 return cpu_to_le32(options);
626 static int new_options(struct hci_dev *hdev, struct sock *skip)
628 __le32 options = get_missing_options(hdev);
630 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
631 sizeof(options), skip);
634 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
636 __le32 options = get_missing_options(hdev);
638 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
642 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
643 void *data, u16 data_len)
645 struct mgmt_rp_read_config_info rp;
648 BT_DBG("sock %p %s", sk, hdev->name);
652 memset(&rp, 0, sizeof(rp));
653 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
655 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
656 options |= MGMT_OPTION_EXTERNAL_CONFIG;
658 if (hdev->set_bdaddr)
659 options |= MGMT_OPTION_PUBLIC_ADDRESS;
661 rp.supported_options = cpu_to_le32(options);
662 rp.missing_options = get_missing_options(hdev);
664 hci_dev_unlock(hdev);
666 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
670 static u32 get_supported_settings(struct hci_dev *hdev)
674 settings |= MGMT_SETTING_POWERED;
675 settings |= MGMT_SETTING_BONDABLE;
676 settings |= MGMT_SETTING_DEBUG_KEYS;
677 settings |= MGMT_SETTING_CONNECTABLE;
678 settings |= MGMT_SETTING_DISCOVERABLE;
680 if (lmp_bredr_capable(hdev)) {
681 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
682 settings |= MGMT_SETTING_FAST_CONNECTABLE;
683 settings |= MGMT_SETTING_BREDR;
684 settings |= MGMT_SETTING_LINK_SECURITY;
686 if (lmp_ssp_capable(hdev)) {
687 settings |= MGMT_SETTING_SSP;
688 settings |= MGMT_SETTING_HS;
691 if (lmp_sc_capable(hdev))
692 settings |= MGMT_SETTING_SECURE_CONN;
695 if (lmp_le_capable(hdev)) {
696 settings |= MGMT_SETTING_LE;
697 settings |= MGMT_SETTING_ADVERTISING;
698 settings |= MGMT_SETTING_SECURE_CONN;
699 settings |= MGMT_SETTING_PRIVACY;
700 settings |= MGMT_SETTING_STATIC_ADDRESS;
703 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
705 settings |= MGMT_SETTING_CONFIGURATION;
710 static u32 get_current_settings(struct hci_dev *hdev)
714 if (hdev_is_powered(hdev))
715 settings |= MGMT_SETTING_POWERED;
717 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
718 settings |= MGMT_SETTING_CONNECTABLE;
720 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
721 settings |= MGMT_SETTING_FAST_CONNECTABLE;
723 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
724 settings |= MGMT_SETTING_DISCOVERABLE;
726 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
727 settings |= MGMT_SETTING_BONDABLE;
729 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
730 settings |= MGMT_SETTING_BREDR;
732 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
733 settings |= MGMT_SETTING_LE;
735 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
736 settings |= MGMT_SETTING_LINK_SECURITY;
738 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
739 settings |= MGMT_SETTING_SSP;
741 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
742 settings |= MGMT_SETTING_HS;
744 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
745 settings |= MGMT_SETTING_ADVERTISING;
747 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
748 settings |= MGMT_SETTING_SECURE_CONN;
750 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
751 settings |= MGMT_SETTING_DEBUG_KEYS;
753 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
754 settings |= MGMT_SETTING_PRIVACY;
756 /* The current setting for static address has two purposes. The
757 * first is to indicate if the static address will be used and
758 * the second is to indicate if it is actually set.
760 * This means if the static address is not configured, this flag
761 * will never bet set. If the address is configured, then if the
762 * address is actually used decides if the flag is set or not.
764 * For single mode LE only controllers and dual-mode controllers
765 * with BR/EDR disabled, the existence of the static address will
768 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
769 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
770 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
771 if (bacmp(&hdev->static_addr, BDADDR_ANY))
772 settings |= MGMT_SETTING_STATIC_ADDRESS;
778 #define PNP_INFO_SVCLASS_ID 0x1200
780 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
782 u8 *ptr = data, *uuids_start = NULL;
783 struct bt_uuid *uuid;
788 list_for_each_entry(uuid, &hdev->uuids, list) {
791 if (uuid->size != 16)
794 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
798 if (uuid16 == PNP_INFO_SVCLASS_ID)
804 uuids_start[1] = EIR_UUID16_ALL;
808 /* Stop if not enough space to put next UUID */
809 if ((ptr - data) + sizeof(u16) > len) {
810 uuids_start[1] = EIR_UUID16_SOME;
814 *ptr++ = (uuid16 & 0x00ff);
815 *ptr++ = (uuid16 & 0xff00) >> 8;
816 uuids_start[0] += sizeof(uuid16);
822 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
824 u8 *ptr = data, *uuids_start = NULL;
825 struct bt_uuid *uuid;
830 list_for_each_entry(uuid, &hdev->uuids, list) {
831 if (uuid->size != 32)
837 uuids_start[1] = EIR_UUID32_ALL;
841 /* Stop if not enough space to put next UUID */
842 if ((ptr - data) + sizeof(u32) > len) {
843 uuids_start[1] = EIR_UUID32_SOME;
847 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
849 uuids_start[0] += sizeof(u32);
855 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
857 u8 *ptr = data, *uuids_start = NULL;
858 struct bt_uuid *uuid;
863 list_for_each_entry(uuid, &hdev->uuids, list) {
864 if (uuid->size != 128)
870 uuids_start[1] = EIR_UUID128_ALL;
874 /* Stop if not enough space to put next UUID */
875 if ((ptr - data) + 16 > len) {
876 uuids_start[1] = EIR_UUID128_SOME;
880 memcpy(ptr, uuid->uuid, 16);
882 uuids_start[0] += 16;
888 static struct mgmt_pending_cmd *mgmt_pending_find(u16 opcode,
889 struct hci_dev *hdev)
891 struct mgmt_pending_cmd *cmd;
893 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
894 if (cmd->opcode == opcode)
901 static struct mgmt_pending_cmd *mgmt_pending_find_data(u16 opcode,
902 struct hci_dev *hdev,
905 struct mgmt_pending_cmd *cmd;
907 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
908 if (cmd->user_data != data)
910 if (cmd->opcode == opcode)
917 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
922 name_len = strlen(hdev->dev_name);
924 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
926 if (name_len > max_len) {
928 ptr[1] = EIR_NAME_SHORT;
930 ptr[1] = EIR_NAME_COMPLETE;
932 ptr[0] = name_len + 1;
934 memcpy(ptr + 2, hdev->dev_name, name_len);
936 ad_len += (name_len + 2);
937 ptr += (name_len + 2);
943 static void update_scan_rsp_data(struct hci_request *req)
945 struct hci_dev *hdev = req->hdev;
946 struct hci_cp_le_set_scan_rsp_data cp;
949 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
952 memset(&cp, 0, sizeof(cp));
954 len = create_scan_rsp_data(hdev, cp.data);
956 if (hdev->scan_rsp_data_len == len &&
957 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
960 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
961 hdev->scan_rsp_data_len = len;
965 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
968 static u8 get_adv_discov_flags(struct hci_dev *hdev)
970 struct mgmt_pending_cmd *cmd;
972 /* If there's a pending mgmt command the flags will not yet have
973 * their final values, so check for this first.
975 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
977 struct mgmt_mode *cp = cmd->param;
979 return LE_AD_GENERAL;
980 else if (cp->val == 0x02)
981 return LE_AD_LIMITED;
983 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
984 return LE_AD_LIMITED;
985 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
986 return LE_AD_GENERAL;
992 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
994 u8 ad_len = 0, flags = 0;
996 flags |= get_adv_discov_flags(hdev);
998 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
999 flags |= LE_AD_NO_BREDR;
1002 BT_DBG("adv flags 0x%02x", flags);
1012 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1014 ptr[1] = EIR_TX_POWER;
1015 ptr[2] = (u8) hdev->adv_tx_power;
1024 static void update_adv_data(struct hci_request *req)
1026 struct hci_dev *hdev = req->hdev;
1027 struct hci_cp_le_set_adv_data cp;
1030 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1033 memset(&cp, 0, sizeof(cp));
1035 len = create_adv_data(hdev, cp.data);
1037 if (hdev->adv_data_len == len &&
1038 memcmp(cp.data, hdev->adv_data, len) == 0)
1041 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1042 hdev->adv_data_len = len;
1046 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1049 int mgmt_update_adv_data(struct hci_dev *hdev)
1051 struct hci_request req;
1053 hci_req_init(&req, hdev);
1054 update_adv_data(&req);
1056 return hci_req_run(&req, NULL);
1059 static void create_eir(struct hci_dev *hdev, u8 *data)
1064 name_len = strlen(hdev->dev_name);
1068 if (name_len > 48) {
1070 ptr[1] = EIR_NAME_SHORT;
1072 ptr[1] = EIR_NAME_COMPLETE;
1074 /* EIR Data length */
1075 ptr[0] = name_len + 1;
1077 memcpy(ptr + 2, hdev->dev_name, name_len);
1079 ptr += (name_len + 2);
1082 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1084 ptr[1] = EIR_TX_POWER;
1085 ptr[2] = (u8) hdev->inq_tx_power;
1090 if (hdev->devid_source > 0) {
1092 ptr[1] = EIR_DEVICE_ID;
1094 put_unaligned_le16(hdev->devid_source, ptr + 2);
1095 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1096 put_unaligned_le16(hdev->devid_product, ptr + 6);
1097 put_unaligned_le16(hdev->devid_version, ptr + 8);
1102 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1103 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1104 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1107 static void update_eir(struct hci_request *req)
1109 struct hci_dev *hdev = req->hdev;
1110 struct hci_cp_write_eir cp;
1112 if (!hdev_is_powered(hdev))
1115 if (!lmp_ext_inq_capable(hdev))
1118 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1121 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1124 memset(&cp, 0, sizeof(cp));
1126 create_eir(hdev, cp.data);
1128 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1131 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1133 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1136 static u8 get_service_classes(struct hci_dev *hdev)
1138 struct bt_uuid *uuid;
1141 list_for_each_entry(uuid, &hdev->uuids, list)
1142 val |= uuid->svc_hint;
1147 static void update_class(struct hci_request *req)
1149 struct hci_dev *hdev = req->hdev;
1152 BT_DBG("%s", hdev->name);
1154 if (!hdev_is_powered(hdev))
1157 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1160 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1163 cod[0] = hdev->minor_class;
1164 cod[1] = hdev->major_class;
1165 cod[2] = get_service_classes(hdev);
1167 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1170 if (memcmp(cod, hdev->dev_class, 3) == 0)
1173 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1176 static bool get_connectable(struct hci_dev *hdev)
1178 struct mgmt_pending_cmd *cmd;
1180 /* If there's a pending mgmt command the flag will not yet have
1181 * it's final value, so check for this first.
1183 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1185 struct mgmt_mode *cp = cmd->param;
1189 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1192 static void disable_advertising(struct hci_request *req)
1196 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1199 static void enable_advertising(struct hci_request *req)
1201 struct hci_dev *hdev = req->hdev;
1202 struct hci_cp_le_set_adv_param cp;
1203 u8 own_addr_type, enable = 0x01;
1206 if (hci_conn_num(hdev, LE_LINK) > 0)
1209 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1210 disable_advertising(req);
1212 /* Clear the HCI_LE_ADV bit temporarily so that the
1213 * hci_update_random_address knows that it's safe to go ahead
1214 * and write a new random address. The flag will be set back on
1215 * as soon as the SET_ADV_ENABLE HCI command completes.
1217 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1219 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1222 connectable = get_connectable(hdev);
1224 /* Set require_privacy to true only when non-connectable
1225 * advertising is used. In that case it is fine to use a
1226 * non-resolvable private address.
1228 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1231 memset(&cp, 0, sizeof(cp));
1232 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1233 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1234 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1235 cp.own_address_type = own_addr_type;
1236 cp.channel_map = hdev->le_adv_channel_map;
1238 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1240 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1243 static void service_cache_off(struct work_struct *work)
1245 struct hci_dev *hdev = container_of(work, struct hci_dev,
1246 service_cache.work);
1247 struct hci_request req;
1249 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1252 hci_req_init(&req, hdev);
1259 hci_dev_unlock(hdev);
1261 hci_req_run(&req, NULL);
1264 static void rpa_expired(struct work_struct *work)
1266 struct hci_dev *hdev = container_of(work, struct hci_dev,
1268 struct hci_request req;
1272 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1274 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1277 /* The generation of a new RPA and programming it into the
1278 * controller happens in the enable_advertising() function.
1280 hci_req_init(&req, hdev);
1281 enable_advertising(&req);
1282 hci_req_run(&req, NULL);
1285 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1287 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1290 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1291 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1293 /* Non-mgmt controlled devices get this bit set
1294 * implicitly so that pairing works for them, however
1295 * for mgmt we require user-space to explicitly enable
1298 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1301 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1302 void *data, u16 data_len)
1304 struct mgmt_rp_read_info rp;
1306 BT_DBG("sock %p %s", sk, hdev->name);
1310 memset(&rp, 0, sizeof(rp));
1312 bacpy(&rp.bdaddr, &hdev->bdaddr);
1314 rp.version = hdev->hci_ver;
1315 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1317 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1318 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1320 memcpy(rp.dev_class, hdev->dev_class, 3);
1322 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1323 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1325 hci_dev_unlock(hdev);
1327 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1331 static void mgmt_pending_free(struct mgmt_pending_cmd *cmd)
1338 static struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1339 struct hci_dev *hdev,
1340 void *data, u16 len)
1342 struct mgmt_pending_cmd *cmd;
1344 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1348 cmd->opcode = opcode;
1349 cmd->index = hdev->id;
1351 cmd->param = kmemdup(data, len, GFP_KERNEL);
1357 cmd->param_len = len;
1362 list_add(&cmd->list, &hdev->mgmt_pending);
1367 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1368 void (*cb)(struct mgmt_pending_cmd *cmd,
1372 struct mgmt_pending_cmd *cmd, *tmp;
1374 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1375 if (opcode > 0 && cmd->opcode != opcode)
1382 static void mgmt_pending_remove(struct mgmt_pending_cmd *cmd)
1384 list_del(&cmd->list);
1385 mgmt_pending_free(cmd);
1388 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1390 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1392 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1396 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1398 BT_DBG("%s status 0x%02x", hdev->name, status);
1400 if (hci_conn_count(hdev) == 0) {
1401 cancel_delayed_work(&hdev->power_off);
1402 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1406 static bool hci_stop_discovery(struct hci_request *req)
1408 struct hci_dev *hdev = req->hdev;
1409 struct hci_cp_remote_name_req_cancel cp;
1410 struct inquiry_entry *e;
1412 switch (hdev->discovery.state) {
1413 case DISCOVERY_FINDING:
1414 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1415 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1417 cancel_delayed_work(&hdev->le_scan_disable);
1418 hci_req_add_le_scan_disable(req);
1423 case DISCOVERY_RESOLVING:
1424 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1429 bacpy(&cp.bdaddr, &e->data.bdaddr);
1430 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1436 /* Passive scanning */
1437 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1438 hci_req_add_le_scan_disable(req);
1448 static int clean_up_hci_state(struct hci_dev *hdev)
1450 struct hci_request req;
1451 struct hci_conn *conn;
1452 bool discov_stopped;
1455 hci_req_init(&req, hdev);
1457 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1458 test_bit(HCI_PSCAN, &hdev->flags)) {
1460 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1463 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1464 disable_advertising(&req);
1466 discov_stopped = hci_stop_discovery(&req);
1468 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1469 struct hci_cp_disconnect dc;
1470 struct hci_cp_reject_conn_req rej;
1472 switch (conn->state) {
1475 dc.handle = cpu_to_le16(conn->handle);
1476 dc.reason = 0x15; /* Terminated due to Power Off */
1477 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1480 if (conn->type == LE_LINK)
1481 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1483 else if (conn->type == ACL_LINK)
1484 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1488 bacpy(&rej.bdaddr, &conn->dst);
1489 rej.reason = 0x15; /* Terminated due to Power Off */
1490 if (conn->type == ACL_LINK)
1491 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1493 else if (conn->type == SCO_LINK)
1494 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1500 err = hci_req_run(&req, clean_up_hci_complete);
1501 if (!err && discov_stopped)
1502 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1507 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1510 struct mgmt_mode *cp = data;
1511 struct mgmt_pending_cmd *cmd;
1514 BT_DBG("request for %s", hdev->name);
1516 if (cp->val != 0x00 && cp->val != 0x01)
1517 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1518 MGMT_STATUS_INVALID_PARAMS);
1522 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1523 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1528 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1529 cancel_delayed_work(&hdev->power_off);
1532 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1534 err = mgmt_powered(hdev, 1);
1539 if (!!cp->val == hdev_is_powered(hdev)) {
1540 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1544 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1551 queue_work(hdev->req_workqueue, &hdev->power_on);
1554 /* Disconnect connections, stop scans, etc */
1555 err = clean_up_hci_state(hdev);
1557 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1558 HCI_POWER_OFF_TIMEOUT);
1560 /* ENODATA means there were no HCI commands queued */
1561 if (err == -ENODATA) {
1562 cancel_delayed_work(&hdev->power_off);
1563 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1569 hci_dev_unlock(hdev);
1573 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1577 ev = cpu_to_le32(get_current_settings(hdev));
1579 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1582 int mgmt_new_settings(struct hci_dev *hdev)
1584 return new_settings(hdev, NULL);
1589 struct hci_dev *hdev;
1593 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1595 struct cmd_lookup *match = data;
1597 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1599 list_del(&cmd->list);
1601 if (match->sk == NULL) {
1602 match->sk = cmd->sk;
1603 sock_hold(match->sk);
1606 mgmt_pending_free(cmd);
1609 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1613 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1614 mgmt_pending_remove(cmd);
1617 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1619 if (cmd->cmd_complete) {
1622 cmd->cmd_complete(cmd, *status);
1623 mgmt_pending_remove(cmd);
1628 cmd_status_rsp(cmd, data);
1631 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1633 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1634 cmd->param, cmd->param_len);
1637 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1639 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1640 cmd->param, sizeof(struct mgmt_addr_info));
1643 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1645 if (!lmp_bredr_capable(hdev))
1646 return MGMT_STATUS_NOT_SUPPORTED;
1647 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1648 return MGMT_STATUS_REJECTED;
1650 return MGMT_STATUS_SUCCESS;
1653 static u8 mgmt_le_support(struct hci_dev *hdev)
1655 if (!lmp_le_capable(hdev))
1656 return MGMT_STATUS_NOT_SUPPORTED;
1657 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1658 return MGMT_STATUS_REJECTED;
1660 return MGMT_STATUS_SUCCESS;
1663 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1666 struct mgmt_pending_cmd *cmd;
1667 struct mgmt_mode *cp;
1668 struct hci_request req;
1671 BT_DBG("status 0x%02x", status);
1675 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1680 u8 mgmt_err = mgmt_status(status);
1681 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1682 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1688 changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1690 if (hdev->discov_timeout > 0) {
1691 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1692 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1696 changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1699 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1702 new_settings(hdev, cmd->sk);
1704 /* When the discoverable mode gets changed, make sure
1705 * that class of device has the limited discoverable
1706 * bit correctly set. Also update page scan based on whitelist
1709 hci_req_init(&req, hdev);
1710 __hci_update_page_scan(&req);
1712 hci_req_run(&req, NULL);
1715 mgmt_pending_remove(cmd);
1718 hci_dev_unlock(hdev);
1721 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1724 struct mgmt_cp_set_discoverable *cp = data;
1725 struct mgmt_pending_cmd *cmd;
1726 struct hci_request req;
1731 BT_DBG("request for %s", hdev->name);
1733 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1734 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1735 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1736 MGMT_STATUS_REJECTED);
1738 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1739 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1740 MGMT_STATUS_INVALID_PARAMS);
1742 timeout = __le16_to_cpu(cp->timeout);
1744 /* Disabling discoverable requires that no timeout is set,
1745 * and enabling limited discoverable requires a timeout.
1747 if ((cp->val == 0x00 && timeout > 0) ||
1748 (cp->val == 0x02 && timeout == 0))
1749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1750 MGMT_STATUS_INVALID_PARAMS);
1754 if (!hdev_is_powered(hdev) && timeout > 0) {
1755 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1756 MGMT_STATUS_NOT_POWERED);
1760 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1761 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1762 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1767 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1768 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1769 MGMT_STATUS_REJECTED);
1773 if (!hdev_is_powered(hdev)) {
1774 bool changed = false;
1776 /* Setting limited discoverable when powered off is
1777 * not a valid operation since it requires a timeout
1778 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1780 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1781 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1785 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1790 err = new_settings(hdev, sk);
1795 /* If the current mode is the same, then just update the timeout
1796 * value with the new value. And if only the timeout gets updated,
1797 * then no need for any HCI transactions.
1799 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1800 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1801 HCI_LIMITED_DISCOVERABLE)) {
1802 cancel_delayed_work(&hdev->discov_off);
1803 hdev->discov_timeout = timeout;
1805 if (cp->val && hdev->discov_timeout > 0) {
1806 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1807 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1811 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1815 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1821 /* Cancel any potential discoverable timeout that might be
1822 * still active and store new timeout value. The arming of
1823 * the timeout happens in the complete handler.
1825 cancel_delayed_work(&hdev->discov_off);
1826 hdev->discov_timeout = timeout;
1828 /* Limited discoverable mode */
1829 if (cp->val == 0x02)
1830 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1832 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1834 hci_req_init(&req, hdev);
1836 /* The procedure for LE-only controllers is much simpler - just
1837 * update the advertising data.
1839 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1845 struct hci_cp_write_current_iac_lap hci_cp;
1847 if (cp->val == 0x02) {
1848 /* Limited discoverable mode */
1849 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1850 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1851 hci_cp.iac_lap[1] = 0x8b;
1852 hci_cp.iac_lap[2] = 0x9e;
1853 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1854 hci_cp.iac_lap[4] = 0x8b;
1855 hci_cp.iac_lap[5] = 0x9e;
1857 /* General discoverable mode */
1859 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1860 hci_cp.iac_lap[1] = 0x8b;
1861 hci_cp.iac_lap[2] = 0x9e;
1864 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1865 (hci_cp.num_iac * 3) + 1, &hci_cp);
1867 scan |= SCAN_INQUIRY;
1869 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1872 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1875 update_adv_data(&req);
1877 err = hci_req_run(&req, set_discoverable_complete);
1879 mgmt_pending_remove(cmd);
1882 hci_dev_unlock(hdev);
1886 static void write_fast_connectable(struct hci_request *req, bool enable)
1888 struct hci_dev *hdev = req->hdev;
1889 struct hci_cp_write_page_scan_activity acp;
1892 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1895 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1899 type = PAGE_SCAN_TYPE_INTERLACED;
1901 /* 160 msec page scan interval */
1902 acp.interval = cpu_to_le16(0x0100);
1904 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1906 /* default 1.28 sec page scan */
1907 acp.interval = cpu_to_le16(0x0800);
1910 acp.window = cpu_to_le16(0x0012);
1912 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1913 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1914 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1917 if (hdev->page_scan_type != type)
1918 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1921 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1924 struct mgmt_pending_cmd *cmd;
1925 struct mgmt_mode *cp;
1926 bool conn_changed, discov_changed;
1928 BT_DBG("status 0x%02x", status);
1932 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1937 u8 mgmt_err = mgmt_status(status);
1938 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1944 conn_changed = !hci_dev_test_and_set_flag(hdev,
1946 discov_changed = false;
1948 conn_changed = hci_dev_test_and_clear_flag(hdev,
1950 discov_changed = hci_dev_test_and_clear_flag(hdev,
1954 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1956 if (conn_changed || discov_changed) {
1957 new_settings(hdev, cmd->sk);
1958 hci_update_page_scan(hdev);
1960 mgmt_update_adv_data(hdev);
1961 hci_update_background_scan(hdev);
1965 mgmt_pending_remove(cmd);
1968 hci_dev_unlock(hdev);
1971 static int set_connectable_update_settings(struct hci_dev *hdev,
1972 struct sock *sk, u8 val)
1974 bool changed = false;
1977 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1981 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1983 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1984 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1987 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1992 hci_update_page_scan(hdev);
1993 hci_update_background_scan(hdev);
1994 return new_settings(hdev, sk);
2000 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
2003 struct mgmt_mode *cp = data;
2004 struct mgmt_pending_cmd *cmd;
2005 struct hci_request req;
2009 BT_DBG("request for %s", hdev->name);
2011 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2012 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2013 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2014 MGMT_STATUS_REJECTED);
2016 if (cp->val != 0x00 && cp->val != 0x01)
2017 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2018 MGMT_STATUS_INVALID_PARAMS);
2022 if (!hdev_is_powered(hdev)) {
2023 err = set_connectable_update_settings(hdev, sk, cp->val);
2027 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
2028 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2029 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2034 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2040 hci_req_init(&req, hdev);
2042 /* If BR/EDR is not enabled and we disable advertising as a
2043 * by-product of disabling connectable, we need to update the
2044 * advertising flags.
2046 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2048 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2049 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2051 update_adv_data(&req);
2052 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2056 /* If we don't have any whitelist entries just
2057 * disable all scanning. If there are entries
2058 * and we had both page and inquiry scanning
2059 * enabled then fall back to only page scanning.
2060 * Otherwise no changes are needed.
2062 if (list_empty(&hdev->whitelist))
2063 scan = SCAN_DISABLED;
2064 else if (test_bit(HCI_ISCAN, &hdev->flags))
2067 goto no_scan_update;
2069 if (test_bit(HCI_ISCAN, &hdev->flags) &&
2070 hdev->discov_timeout > 0)
2071 cancel_delayed_work(&hdev->discov_off);
2074 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2078 /* Update the advertising parameters if necessary */
2079 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2080 enable_advertising(&req);
2082 err = hci_req_run(&req, set_connectable_complete);
2084 mgmt_pending_remove(cmd);
2085 if (err == -ENODATA)
2086 err = set_connectable_update_settings(hdev, sk,
2092 hci_dev_unlock(hdev);
2096 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2099 struct mgmt_mode *cp = data;
2103 BT_DBG("request for %s", hdev->name);
2105 if (cp->val != 0x00 && cp->val != 0x01)
2106 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2107 MGMT_STATUS_INVALID_PARAMS);
2112 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2114 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2116 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2121 err = new_settings(hdev, sk);
2124 hci_dev_unlock(hdev);
2128 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2131 struct mgmt_mode *cp = data;
2132 struct mgmt_pending_cmd *cmd;
2136 BT_DBG("request for %s", hdev->name);
2138 status = mgmt_bredr_support(hdev);
2140 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2143 if (cp->val != 0x00 && cp->val != 0x01)
2144 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2145 MGMT_STATUS_INVALID_PARAMS);
2149 if (!hdev_is_powered(hdev)) {
2150 bool changed = false;
2152 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2153 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2157 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2162 err = new_settings(hdev, sk);
2167 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2168 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2175 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2176 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2180 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2186 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2188 mgmt_pending_remove(cmd);
2193 hci_dev_unlock(hdev);
2197 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2199 struct mgmt_mode *cp = data;
2200 struct mgmt_pending_cmd *cmd;
2204 BT_DBG("request for %s", hdev->name);
2206 status = mgmt_bredr_support(hdev);
2208 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2210 if (!lmp_ssp_capable(hdev))
2211 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2212 MGMT_STATUS_NOT_SUPPORTED);
2214 if (cp->val != 0x00 && cp->val != 0x01)
2215 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2216 MGMT_STATUS_INVALID_PARAMS);
2220 if (!hdev_is_powered(hdev)) {
2224 changed = !hci_dev_test_and_set_flag(hdev,
2227 changed = hci_dev_test_and_clear_flag(hdev,
2230 changed = hci_dev_test_and_clear_flag(hdev,
2233 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2236 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2241 err = new_settings(hdev, sk);
2246 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2247 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2252 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2253 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2257 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2263 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2264 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2265 sizeof(cp->val), &cp->val);
2267 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2269 mgmt_pending_remove(cmd);
2274 hci_dev_unlock(hdev);
2278 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2280 struct mgmt_mode *cp = data;
2285 BT_DBG("request for %s", hdev->name);
2287 status = mgmt_bredr_support(hdev);
2289 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2291 if (!lmp_ssp_capable(hdev))
2292 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2293 MGMT_STATUS_NOT_SUPPORTED);
2295 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2296 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2297 MGMT_STATUS_REJECTED);
2299 if (cp->val != 0x00 && cp->val != 0x01)
2300 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2301 MGMT_STATUS_INVALID_PARAMS);
2305 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2306 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2312 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2314 if (hdev_is_powered(hdev)) {
2315 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2316 MGMT_STATUS_REJECTED);
2320 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2323 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2328 err = new_settings(hdev, sk);
2331 hci_dev_unlock(hdev);
2335 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2337 struct cmd_lookup match = { NULL, hdev };
2342 u8 mgmt_err = mgmt_status(status);
2344 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2349 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2351 new_settings(hdev, match.sk);
2356 /* Make sure the controller has a good default for
2357 * advertising data. Restrict the update to when LE
2358 * has actually been enabled. During power on, the
2359 * update in powered_update_hci will take care of it.
2361 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2362 struct hci_request req;
2364 hci_req_init(&req, hdev);
2365 update_adv_data(&req);
2366 update_scan_rsp_data(&req);
2367 __hci_update_background_scan(&req);
2368 hci_req_run(&req, NULL);
2372 hci_dev_unlock(hdev);
2375 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2377 struct mgmt_mode *cp = data;
2378 struct hci_cp_write_le_host_supported hci_cp;
2379 struct mgmt_pending_cmd *cmd;
2380 struct hci_request req;
2384 BT_DBG("request for %s", hdev->name);
2386 if (!lmp_le_capable(hdev))
2387 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2388 MGMT_STATUS_NOT_SUPPORTED);
2390 if (cp->val != 0x00 && cp->val != 0x01)
2391 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2392 MGMT_STATUS_INVALID_PARAMS);
2394 /* LE-only devices do not allow toggling LE on/off */
2395 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2396 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2397 MGMT_STATUS_REJECTED);
2402 enabled = lmp_host_le_capable(hdev);
2404 if (!hdev_is_powered(hdev) || val == enabled) {
2405 bool changed = false;
2407 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2408 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2412 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2413 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2417 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2422 err = new_settings(hdev, sk);
2427 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2428 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2429 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2434 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2440 hci_req_init(&req, hdev);
2442 memset(&hci_cp, 0, sizeof(hci_cp));
2446 hci_cp.simul = 0x00;
2448 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2449 disable_advertising(&req);
2452 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2455 err = hci_req_run(&req, le_enable_complete);
2457 mgmt_pending_remove(cmd);
2460 hci_dev_unlock(hdev);
2464 /* This is a helper function to test for pending mgmt commands that can
2465 * cause CoD or EIR HCI commands. We can only allow one such pending
2466 * mgmt command at a time since otherwise we cannot easily track what
2467 * the current values are, will be, and based on that calculate if a new
2468 * HCI command needs to be sent and if yes with what value.
2470 static bool pending_eir_or_class(struct hci_dev *hdev)
2472 struct mgmt_pending_cmd *cmd;
2474 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2475 switch (cmd->opcode) {
2476 case MGMT_OP_ADD_UUID:
2477 case MGMT_OP_REMOVE_UUID:
2478 case MGMT_OP_SET_DEV_CLASS:
2479 case MGMT_OP_SET_POWERED:
2487 static const u8 bluetooth_base_uuid[] = {
2488 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2489 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2492 static u8 get_uuid_size(const u8 *uuid)
2496 if (memcmp(uuid, bluetooth_base_uuid, 12))
2499 val = get_unaligned_le32(&uuid[12]);
2506 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2508 struct mgmt_pending_cmd *cmd;
2512 cmd = mgmt_pending_find(mgmt_op, hdev);
2516 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2517 mgmt_status(status), hdev->dev_class, 3);
2519 mgmt_pending_remove(cmd);
2522 hci_dev_unlock(hdev);
2525 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2527 BT_DBG("status 0x%02x", status);
2529 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2532 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2534 struct mgmt_cp_add_uuid *cp = data;
2535 struct mgmt_pending_cmd *cmd;
2536 struct hci_request req;
2537 struct bt_uuid *uuid;
2540 BT_DBG("request for %s", hdev->name);
2544 if (pending_eir_or_class(hdev)) {
2545 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2550 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2556 memcpy(uuid->uuid, cp->uuid, 16);
2557 uuid->svc_hint = cp->svc_hint;
2558 uuid->size = get_uuid_size(cp->uuid);
2560 list_add_tail(&uuid->list, &hdev->uuids);
2562 hci_req_init(&req, hdev);
2567 err = hci_req_run(&req, add_uuid_complete);
2569 if (err != -ENODATA)
2572 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2573 hdev->dev_class, 3);
2577 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2586 hci_dev_unlock(hdev);
2590 static bool enable_service_cache(struct hci_dev *hdev)
2592 if (!hdev_is_powered(hdev))
2595 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2596 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2604 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2606 BT_DBG("status 0x%02x", status);
2608 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2611 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2614 struct mgmt_cp_remove_uuid *cp = data;
2615 struct mgmt_pending_cmd *cmd;
2616 struct bt_uuid *match, *tmp;
2617 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2618 struct hci_request req;
2621 BT_DBG("request for %s", hdev->name);
2625 if (pending_eir_or_class(hdev)) {
2626 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2631 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2632 hci_uuids_clear(hdev);
2634 if (enable_service_cache(hdev)) {
2635 err = mgmt_cmd_complete(sk, hdev->id,
2636 MGMT_OP_REMOVE_UUID,
2637 0, hdev->dev_class, 3);
2646 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2647 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2650 list_del(&match->list);
2656 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2657 MGMT_STATUS_INVALID_PARAMS);
2662 hci_req_init(&req, hdev);
2667 err = hci_req_run(&req, remove_uuid_complete);
2669 if (err != -ENODATA)
2672 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2673 hdev->dev_class, 3);
2677 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2686 hci_dev_unlock(hdev);
2690 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2692 BT_DBG("status 0x%02x", status);
2694 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2697 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2700 struct mgmt_cp_set_dev_class *cp = data;
2701 struct mgmt_pending_cmd *cmd;
2702 struct hci_request req;
2705 BT_DBG("request for %s", hdev->name);
2707 if (!lmp_bredr_capable(hdev))
2708 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2709 MGMT_STATUS_NOT_SUPPORTED);
2713 if (pending_eir_or_class(hdev)) {
2714 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2719 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2720 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2721 MGMT_STATUS_INVALID_PARAMS);
2725 hdev->major_class = cp->major;
2726 hdev->minor_class = cp->minor;
2728 if (!hdev_is_powered(hdev)) {
2729 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2730 hdev->dev_class, 3);
2734 hci_req_init(&req, hdev);
2736 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2737 hci_dev_unlock(hdev);
2738 cancel_delayed_work_sync(&hdev->service_cache);
2745 err = hci_req_run(&req, set_class_complete);
2747 if (err != -ENODATA)
2750 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2751 hdev->dev_class, 3);
2755 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2764 hci_dev_unlock(hdev);
2768 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2771 struct mgmt_cp_load_link_keys *cp = data;
2772 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2773 sizeof(struct mgmt_link_key_info));
2774 u16 key_count, expected_len;
2778 BT_DBG("request for %s", hdev->name);
2780 if (!lmp_bredr_capable(hdev))
2781 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2782 MGMT_STATUS_NOT_SUPPORTED);
2784 key_count = __le16_to_cpu(cp->key_count);
2785 if (key_count > max_key_count) {
2786 BT_ERR("load_link_keys: too big key_count value %u",
2788 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2789 MGMT_STATUS_INVALID_PARAMS);
2792 expected_len = sizeof(*cp) + key_count *
2793 sizeof(struct mgmt_link_key_info);
2794 if (expected_len != len) {
2795 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2797 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2798 MGMT_STATUS_INVALID_PARAMS);
2801 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2802 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2803 MGMT_STATUS_INVALID_PARAMS);
2805 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2808 for (i = 0; i < key_count; i++) {
2809 struct mgmt_link_key_info *key = &cp->keys[i];
2811 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2812 return mgmt_cmd_status(sk, hdev->id,
2813 MGMT_OP_LOAD_LINK_KEYS,
2814 MGMT_STATUS_INVALID_PARAMS);
2819 hci_link_keys_clear(hdev);
2822 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2824 changed = hci_dev_test_and_clear_flag(hdev,
2825 HCI_KEEP_DEBUG_KEYS);
2828 new_settings(hdev, NULL);
2830 for (i = 0; i < key_count; i++) {
2831 struct mgmt_link_key_info *key = &cp->keys[i];
2833 /* Always ignore debug keys and require a new pairing if
2834 * the user wants to use them.
2836 if (key->type == HCI_LK_DEBUG_COMBINATION)
2839 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2840 key->type, key->pin_len, NULL);
2843 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2845 hci_dev_unlock(hdev);
2850 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2851 u8 addr_type, struct sock *skip_sk)
2853 struct mgmt_ev_device_unpaired ev;
2855 bacpy(&ev.addr.bdaddr, bdaddr);
2856 ev.addr.type = addr_type;
2858 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2862 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2865 struct mgmt_cp_unpair_device *cp = data;
2866 struct mgmt_rp_unpair_device rp;
2867 struct hci_cp_disconnect dc;
2868 struct mgmt_pending_cmd *cmd;
2869 struct hci_conn *conn;
2872 memset(&rp, 0, sizeof(rp));
2873 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2874 rp.addr.type = cp->addr.type;
2876 if (!bdaddr_type_is_valid(cp->addr.type))
2877 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2878 MGMT_STATUS_INVALID_PARAMS,
2881 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2882 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2883 MGMT_STATUS_INVALID_PARAMS,
2888 if (!hdev_is_powered(hdev)) {
2889 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2890 MGMT_STATUS_NOT_POWERED, &rp,
2895 if (cp->addr.type == BDADDR_BREDR) {
2896 /* If disconnection is requested, then look up the
2897 * connection. If the remote device is connected, it
2898 * will be later used to terminate the link.
2900 * Setting it to NULL explicitly will cause no
2901 * termination of the link.
2904 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2909 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2913 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2916 /* Defer clearing up the connection parameters
2917 * until closing to give a chance of keeping
2918 * them if a repairing happens.
2920 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2922 /* If disconnection is not requested, then
2923 * clear the connection variable so that the
2924 * link is not terminated.
2926 if (!cp->disconnect)
2930 if (cp->addr.type == BDADDR_LE_PUBLIC)
2931 addr_type = ADDR_LE_DEV_PUBLIC;
2933 addr_type = ADDR_LE_DEV_RANDOM;
2935 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2937 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2941 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2942 MGMT_STATUS_NOT_PAIRED, &rp,
2947 /* If the connection variable is set, then termination of the
2948 * link is requested.
2951 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2953 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2957 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2964 cmd->cmd_complete = addr_cmd_complete;
2966 dc.handle = cpu_to_le16(conn->handle);
2967 dc.reason = 0x13; /* Remote User Terminated Connection */
2968 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2970 mgmt_pending_remove(cmd);
2973 hci_dev_unlock(hdev);
2977 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2980 struct mgmt_cp_disconnect *cp = data;
2981 struct mgmt_rp_disconnect rp;
2982 struct mgmt_pending_cmd *cmd;
2983 struct hci_conn *conn;
2988 memset(&rp, 0, sizeof(rp));
2989 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2990 rp.addr.type = cp->addr.type;
2992 if (!bdaddr_type_is_valid(cp->addr.type))
2993 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2994 MGMT_STATUS_INVALID_PARAMS,
2999 if (!test_bit(HCI_UP, &hdev->flags)) {
3000 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3001 MGMT_STATUS_NOT_POWERED, &rp,
3006 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
3007 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3008 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3012 if (cp->addr.type == BDADDR_BREDR)
3013 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3016 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
3018 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3019 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3020 MGMT_STATUS_NOT_CONNECTED, &rp,
3025 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3031 cmd->cmd_complete = generic_cmd_complete;
3033 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3035 mgmt_pending_remove(cmd);
3038 hci_dev_unlock(hdev);
3042 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3044 switch (link_type) {
3046 switch (addr_type) {
3047 case ADDR_LE_DEV_PUBLIC:
3048 return BDADDR_LE_PUBLIC;
3051 /* Fallback to LE Random address type */
3052 return BDADDR_LE_RANDOM;
3056 /* Fallback to BR/EDR type */
3057 return BDADDR_BREDR;
3061 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3064 struct mgmt_rp_get_connections *rp;
3074 if (!hdev_is_powered(hdev)) {
3075 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3076 MGMT_STATUS_NOT_POWERED);
3081 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3082 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3086 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3087 rp = kmalloc(rp_len, GFP_KERNEL);
3094 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3095 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3097 bacpy(&rp->addr[i].bdaddr, &c->dst);
3098 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3099 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3104 rp->conn_count = cpu_to_le16(i);
3106 /* Recalculate length in case of filtered SCO connections, etc */
3107 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3109 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3115 hci_dev_unlock(hdev);
3119 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3120 struct mgmt_cp_pin_code_neg_reply *cp)
3122 struct mgmt_pending_cmd *cmd;
3125 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3130 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3131 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3133 mgmt_pending_remove(cmd);
3138 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3141 struct hci_conn *conn;
3142 struct mgmt_cp_pin_code_reply *cp = data;
3143 struct hci_cp_pin_code_reply reply;
3144 struct mgmt_pending_cmd *cmd;
3151 if (!hdev_is_powered(hdev)) {
3152 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3153 MGMT_STATUS_NOT_POWERED);
3157 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3159 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3160 MGMT_STATUS_NOT_CONNECTED);
3164 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3165 struct mgmt_cp_pin_code_neg_reply ncp;
3167 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3169 BT_ERR("PIN code is not 16 bytes long");
3171 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3173 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3174 MGMT_STATUS_INVALID_PARAMS);
3179 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3185 cmd->cmd_complete = addr_cmd_complete;
3187 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3188 reply.pin_len = cp->pin_len;
3189 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3191 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3193 mgmt_pending_remove(cmd);
3196 hci_dev_unlock(hdev);
3200 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3203 struct mgmt_cp_set_io_capability *cp = data;
3207 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3208 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3209 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3213 hdev->io_capability = cp->io_capability;
3215 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3216 hdev->io_capability);
3218 hci_dev_unlock(hdev);
3220 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3224 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3226 struct hci_dev *hdev = conn->hdev;
3227 struct mgmt_pending_cmd *cmd;
3229 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3230 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3233 if (cmd->user_data != conn)
3242 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3244 struct mgmt_rp_pair_device rp;
3245 struct hci_conn *conn = cmd->user_data;
3248 bacpy(&rp.addr.bdaddr, &conn->dst);
3249 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3251 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3252 status, &rp, sizeof(rp));
3254 /* So we don't get further callbacks for this connection */
3255 conn->connect_cfm_cb = NULL;
3256 conn->security_cfm_cb = NULL;
3257 conn->disconn_cfm_cb = NULL;
3259 hci_conn_drop(conn);
3261 /* The device is paired so there is no need to remove
3262 * its connection parameters anymore.
3264 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3271 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3273 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3274 struct mgmt_pending_cmd *cmd;
3276 cmd = find_pairing(conn);
3278 cmd->cmd_complete(cmd, status);
3279 mgmt_pending_remove(cmd);
3283 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3285 struct mgmt_pending_cmd *cmd;
3287 BT_DBG("status %u", status);
3289 cmd = find_pairing(conn);
3291 BT_DBG("Unable to find a pending command");
3295 cmd->cmd_complete(cmd, mgmt_status(status));
3296 mgmt_pending_remove(cmd);
3299 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3301 struct mgmt_pending_cmd *cmd;
3303 BT_DBG("status %u", status);
3308 cmd = find_pairing(conn);
3310 BT_DBG("Unable to find a pending command");
3314 cmd->cmd_complete(cmd, mgmt_status(status));
3315 mgmt_pending_remove(cmd);
3318 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3321 struct mgmt_cp_pair_device *cp = data;
3322 struct mgmt_rp_pair_device rp;
3323 struct mgmt_pending_cmd *cmd;
3324 u8 sec_level, auth_type;
3325 struct hci_conn *conn;
3330 memset(&rp, 0, sizeof(rp));
3331 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3332 rp.addr.type = cp->addr.type;
3334 if (!bdaddr_type_is_valid(cp->addr.type))
3335 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3336 MGMT_STATUS_INVALID_PARAMS,
3339 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3340 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3341 MGMT_STATUS_INVALID_PARAMS,
3346 if (!hdev_is_powered(hdev)) {
3347 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3348 MGMT_STATUS_NOT_POWERED, &rp,
3353 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3354 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3355 MGMT_STATUS_ALREADY_PAIRED, &rp,
3360 sec_level = BT_SECURITY_MEDIUM;
3361 auth_type = HCI_AT_DEDICATED_BONDING;
3363 if (cp->addr.type == BDADDR_BREDR) {
3364 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3369 /* Convert from L2CAP channel address type to HCI address type
3371 if (cp->addr.type == BDADDR_LE_PUBLIC)
3372 addr_type = ADDR_LE_DEV_PUBLIC;
3374 addr_type = ADDR_LE_DEV_RANDOM;
3376 /* When pairing a new device, it is expected to remember
3377 * this device for future connections. Adding the connection
3378 * parameter information ahead of time allows tracking
3379 * of the slave preferred values and will speed up any
3380 * further connection establishment.
3382 * If connection parameters already exist, then they
3383 * will be kept and this function does nothing.
3385 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3387 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3388 sec_level, HCI_LE_CONN_TIMEOUT,
3395 if (PTR_ERR(conn) == -EBUSY)
3396 status = MGMT_STATUS_BUSY;
3397 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3398 status = MGMT_STATUS_NOT_SUPPORTED;
3399 else if (PTR_ERR(conn) == -ECONNREFUSED)
3400 status = MGMT_STATUS_REJECTED;
3402 status = MGMT_STATUS_CONNECT_FAILED;
3404 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3405 status, &rp, sizeof(rp));
3409 if (conn->connect_cfm_cb) {
3410 hci_conn_drop(conn);
3411 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3412 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3416 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3419 hci_conn_drop(conn);
3423 cmd->cmd_complete = pairing_complete;
3425 /* For LE, just connecting isn't a proof that the pairing finished */
3426 if (cp->addr.type == BDADDR_BREDR) {
3427 conn->connect_cfm_cb = pairing_complete_cb;
3428 conn->security_cfm_cb = pairing_complete_cb;
3429 conn->disconn_cfm_cb = pairing_complete_cb;
3431 conn->connect_cfm_cb = le_pairing_complete_cb;
3432 conn->security_cfm_cb = le_pairing_complete_cb;
3433 conn->disconn_cfm_cb = le_pairing_complete_cb;
3436 conn->io_capability = cp->io_cap;
3437 cmd->user_data = hci_conn_get(conn);
3439 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3440 hci_conn_security(conn, sec_level, auth_type, true)) {
3441 cmd->cmd_complete(cmd, 0);
3442 mgmt_pending_remove(cmd);
3448 hci_dev_unlock(hdev);
3452 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3455 struct mgmt_addr_info *addr = data;
3456 struct mgmt_pending_cmd *cmd;
3457 struct hci_conn *conn;
3464 if (!hdev_is_powered(hdev)) {
3465 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3466 MGMT_STATUS_NOT_POWERED);
3470 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3472 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3473 MGMT_STATUS_INVALID_PARAMS);
3477 conn = cmd->user_data;
3479 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3480 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3481 MGMT_STATUS_INVALID_PARAMS);
3485 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3486 mgmt_pending_remove(cmd);
3488 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3489 addr, sizeof(*addr));
3491 hci_dev_unlock(hdev);
3495 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3496 struct mgmt_addr_info *addr, u16 mgmt_op,
3497 u16 hci_op, __le32 passkey)
3499 struct mgmt_pending_cmd *cmd;
3500 struct hci_conn *conn;
3505 if (!hdev_is_powered(hdev)) {
3506 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3507 MGMT_STATUS_NOT_POWERED, addr,
3512 if (addr->type == BDADDR_BREDR)
3513 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3515 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3518 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3519 MGMT_STATUS_NOT_CONNECTED, addr,
3524 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3525 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3527 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3528 MGMT_STATUS_SUCCESS, addr,
3531 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3532 MGMT_STATUS_FAILED, addr,
3538 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3544 cmd->cmd_complete = addr_cmd_complete;
3546 /* Continue with pairing via HCI */
3547 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3548 struct hci_cp_user_passkey_reply cp;
3550 bacpy(&cp.bdaddr, &addr->bdaddr);
3551 cp.passkey = passkey;
3552 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3554 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3558 mgmt_pending_remove(cmd);
3561 hci_dev_unlock(hdev);
3565 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3566 void *data, u16 len)
3568 struct mgmt_cp_pin_code_neg_reply *cp = data;
3572 return user_pairing_resp(sk, hdev, &cp->addr,
3573 MGMT_OP_PIN_CODE_NEG_REPLY,
3574 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3577 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3580 struct mgmt_cp_user_confirm_reply *cp = data;
3584 if (len != sizeof(*cp))
3585 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3586 MGMT_STATUS_INVALID_PARAMS);
3588 return user_pairing_resp(sk, hdev, &cp->addr,
3589 MGMT_OP_USER_CONFIRM_REPLY,
3590 HCI_OP_USER_CONFIRM_REPLY, 0);
3593 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3594 void *data, u16 len)
3596 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3600 return user_pairing_resp(sk, hdev, &cp->addr,
3601 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3602 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3605 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3608 struct mgmt_cp_user_passkey_reply *cp = data;
3612 return user_pairing_resp(sk, hdev, &cp->addr,
3613 MGMT_OP_USER_PASSKEY_REPLY,
3614 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3617 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3618 void *data, u16 len)
3620 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3624 return user_pairing_resp(sk, hdev, &cp->addr,
3625 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3626 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3629 static void update_name(struct hci_request *req)
3631 struct hci_dev *hdev = req->hdev;
3632 struct hci_cp_write_local_name cp;
3634 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3636 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3639 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3641 struct mgmt_cp_set_local_name *cp;
3642 struct mgmt_pending_cmd *cmd;
3644 BT_DBG("status 0x%02x", status);
3648 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3655 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3656 mgmt_status(status));
3658 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3661 mgmt_pending_remove(cmd);
3664 hci_dev_unlock(hdev);
3667 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3670 struct mgmt_cp_set_local_name *cp = data;
3671 struct mgmt_pending_cmd *cmd;
3672 struct hci_request req;
3679 /* If the old values are the same as the new ones just return a
3680 * direct command complete event.
3682 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3683 !memcmp(hdev->short_name, cp->short_name,
3684 sizeof(hdev->short_name))) {
3685 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3690 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3692 if (!hdev_is_powered(hdev)) {
3693 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3695 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3700 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3706 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3712 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3714 hci_req_init(&req, hdev);
3716 if (lmp_bredr_capable(hdev)) {
3721 /* The name is stored in the scan response data and so
3722 * no need to udpate the advertising data here.
3724 if (lmp_le_capable(hdev))
3725 update_scan_rsp_data(&req);
3727 err = hci_req_run(&req, set_name_complete);
3729 mgmt_pending_remove(cmd);
3732 hci_dev_unlock(hdev);
3736 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3737 void *data, u16 data_len)
3739 struct mgmt_pending_cmd *cmd;
3742 BT_DBG("%s", hdev->name);
3746 if (!hdev_is_powered(hdev)) {
3747 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3748 MGMT_STATUS_NOT_POWERED);
3752 if (!lmp_ssp_capable(hdev)) {
3753 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3754 MGMT_STATUS_NOT_SUPPORTED);
3758 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3759 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3764 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3770 if (bredr_sc_enabled(hdev))
3771 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3774 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3777 mgmt_pending_remove(cmd);
3780 hci_dev_unlock(hdev);
3784 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3785 void *data, u16 len)
3787 struct mgmt_addr_info *addr = data;
3790 BT_DBG("%s ", hdev->name);
3792 if (!bdaddr_type_is_valid(addr->type))
3793 return mgmt_cmd_complete(sk, hdev->id,
3794 MGMT_OP_ADD_REMOTE_OOB_DATA,
3795 MGMT_STATUS_INVALID_PARAMS,
3796 addr, sizeof(*addr));
3800 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3801 struct mgmt_cp_add_remote_oob_data *cp = data;
3804 if (cp->addr.type != BDADDR_BREDR) {
3805 err = mgmt_cmd_complete(sk, hdev->id,
3806 MGMT_OP_ADD_REMOTE_OOB_DATA,
3807 MGMT_STATUS_INVALID_PARAMS,
3808 &cp->addr, sizeof(cp->addr));
3812 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3813 cp->addr.type, cp->hash,
3814 cp->rand, NULL, NULL);
3816 status = MGMT_STATUS_FAILED;
3818 status = MGMT_STATUS_SUCCESS;
3820 err = mgmt_cmd_complete(sk, hdev->id,
3821 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3822 &cp->addr, sizeof(cp->addr));
3823 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3824 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3825 u8 *rand192, *hash192, *rand256, *hash256;
3828 if (bdaddr_type_is_le(cp->addr.type)) {
3829 /* Enforce zero-valued 192-bit parameters as
3830 * long as legacy SMP OOB isn't implemented.
3832 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3833 memcmp(cp->hash192, ZERO_KEY, 16)) {
3834 err = mgmt_cmd_complete(sk, hdev->id,
3835 MGMT_OP_ADD_REMOTE_OOB_DATA,
3836 MGMT_STATUS_INVALID_PARAMS,
3837 addr, sizeof(*addr));
3844 /* In case one of the P-192 values is set to zero,
3845 * then just disable OOB data for P-192.
3847 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3848 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3852 rand192 = cp->rand192;
3853 hash192 = cp->hash192;
3857 /* In case one of the P-256 values is set to zero, then just
3858 * disable OOB data for P-256.
3860 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3861 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3865 rand256 = cp->rand256;
3866 hash256 = cp->hash256;
3869 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3870 cp->addr.type, hash192, rand192,
3873 status = MGMT_STATUS_FAILED;
3875 status = MGMT_STATUS_SUCCESS;
3877 err = mgmt_cmd_complete(sk, hdev->id,
3878 MGMT_OP_ADD_REMOTE_OOB_DATA,
3879 status, &cp->addr, sizeof(cp->addr));
3881 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3882 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3883 MGMT_STATUS_INVALID_PARAMS);
3887 hci_dev_unlock(hdev);
3891 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3892 void *data, u16 len)
3894 struct mgmt_cp_remove_remote_oob_data *cp = data;
3898 BT_DBG("%s", hdev->name);
3900 if (cp->addr.type != BDADDR_BREDR)
3901 return mgmt_cmd_complete(sk, hdev->id,
3902 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3903 MGMT_STATUS_INVALID_PARAMS,
3904 &cp->addr, sizeof(cp->addr));
3908 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3909 hci_remote_oob_data_clear(hdev);
3910 status = MGMT_STATUS_SUCCESS;
3914 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3916 status = MGMT_STATUS_INVALID_PARAMS;
3918 status = MGMT_STATUS_SUCCESS;
3921 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3922 status, &cp->addr, sizeof(cp->addr));
3924 hci_dev_unlock(hdev);
3928 static bool trigger_discovery(struct hci_request *req, u8 *status)
3930 struct hci_dev *hdev = req->hdev;
3931 struct hci_cp_le_set_scan_param param_cp;
3932 struct hci_cp_le_set_scan_enable enable_cp;
3933 struct hci_cp_inquiry inq_cp;
3934 /* General inquiry access code (GIAC) */
3935 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3939 switch (hdev->discovery.type) {
3940 case DISCOV_TYPE_BREDR:
3941 *status = mgmt_bredr_support(hdev);
3945 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3946 *status = MGMT_STATUS_BUSY;
3950 hci_inquiry_cache_flush(hdev);
3952 memset(&inq_cp, 0, sizeof(inq_cp));
3953 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3954 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3955 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3958 case DISCOV_TYPE_LE:
3959 case DISCOV_TYPE_INTERLEAVED:
3960 *status = mgmt_le_support(hdev);
3964 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3965 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
3966 *status = MGMT_STATUS_NOT_SUPPORTED;
3970 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
3971 /* Don't let discovery abort an outgoing
3972 * connection attempt that's using directed
3975 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3977 *status = MGMT_STATUS_REJECTED;
3981 disable_advertising(req);
3984 /* If controller is scanning, it means the background scanning
3985 * is running. Thus, we should temporarily stop it in order to
3986 * set the discovery scanning parameters.
3988 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
3989 hci_req_add_le_scan_disable(req);
3991 memset(¶m_cp, 0, sizeof(param_cp));
3993 /* All active scans will be done with either a resolvable
3994 * private address (when privacy feature has been enabled)
3995 * or non-resolvable private address.
3997 err = hci_update_random_address(req, true, &own_addr_type);
3999 *status = MGMT_STATUS_FAILED;
4003 param_cp.type = LE_SCAN_ACTIVE;
4004 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
4005 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
4006 param_cp.own_address_type = own_addr_type;
4007 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
4010 memset(&enable_cp, 0, sizeof(enable_cp));
4011 enable_cp.enable = LE_SCAN_ENABLE;
4012 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4013 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
4018 *status = MGMT_STATUS_INVALID_PARAMS;
4025 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
4028 struct mgmt_pending_cmd *cmd;
4029 unsigned long timeout;
4031 BT_DBG("status %d", status);
4035 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4037 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4040 cmd->cmd_complete(cmd, mgmt_status(status));
4041 mgmt_pending_remove(cmd);
4045 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4049 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
4051 /* If the scan involves LE scan, pick proper timeout to schedule
4052 * hdev->le_scan_disable that will stop it.
4054 switch (hdev->discovery.type) {
4055 case DISCOV_TYPE_LE:
4056 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4058 case DISCOV_TYPE_INTERLEAVED:
4059 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4061 case DISCOV_TYPE_BREDR:
4065 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4071 /* When service discovery is used and the controller has
4072 * a strict duplicate filter, it is important to remember
4073 * the start and duration of the scan. This is required
4074 * for restarting scanning during the discovery phase.
4076 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
4078 hdev->discovery.result_filtering) {
4079 hdev->discovery.scan_start = jiffies;
4080 hdev->discovery.scan_duration = timeout;
4083 queue_delayed_work(hdev->workqueue,
4084 &hdev->le_scan_disable, timeout);
4088 hci_dev_unlock(hdev);
4091 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4092 void *data, u16 len)
4094 struct mgmt_cp_start_discovery *cp = data;
4095 struct mgmt_pending_cmd *cmd;
4096 struct hci_request req;
4100 BT_DBG("%s", hdev->name);
4104 if (!hdev_is_powered(hdev)) {
4105 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4106 MGMT_STATUS_NOT_POWERED,
4107 &cp->type, sizeof(cp->type));
4111 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4112 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4113 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4114 MGMT_STATUS_BUSY, &cp->type,
4119 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4125 cmd->cmd_complete = generic_cmd_complete;
4127 /* Clear the discovery filter first to free any previously
4128 * allocated memory for the UUID list.
4130 hci_discovery_filter_clear(hdev);
4132 hdev->discovery.type = cp->type;
4133 hdev->discovery.report_invalid_rssi = false;
4135 hci_req_init(&req, hdev);
4137 if (!trigger_discovery(&req, &status)) {
4138 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4139 status, &cp->type, sizeof(cp->type));
4140 mgmt_pending_remove(cmd);
4144 err = hci_req_run(&req, start_discovery_complete);
4146 mgmt_pending_remove(cmd);
4150 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4153 hci_dev_unlock(hdev);
4157 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4160 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4164 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4165 void *data, u16 len)
4167 struct mgmt_cp_start_service_discovery *cp = data;
4168 struct mgmt_pending_cmd *cmd;
4169 struct hci_request req;
4170 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4171 u16 uuid_count, expected_len;
4175 BT_DBG("%s", hdev->name);
4179 if (!hdev_is_powered(hdev)) {
4180 err = mgmt_cmd_complete(sk, hdev->id,
4181 MGMT_OP_START_SERVICE_DISCOVERY,
4182 MGMT_STATUS_NOT_POWERED,
4183 &cp->type, sizeof(cp->type));
4187 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4188 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4189 err = mgmt_cmd_complete(sk, hdev->id,
4190 MGMT_OP_START_SERVICE_DISCOVERY,
4191 MGMT_STATUS_BUSY, &cp->type,
4196 uuid_count = __le16_to_cpu(cp->uuid_count);
4197 if (uuid_count > max_uuid_count) {
4198 BT_ERR("service_discovery: too big uuid_count value %u",
4200 err = mgmt_cmd_complete(sk, hdev->id,
4201 MGMT_OP_START_SERVICE_DISCOVERY,
4202 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4207 expected_len = sizeof(*cp) + uuid_count * 16;
4208 if (expected_len != len) {
4209 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4211 err = mgmt_cmd_complete(sk, hdev->id,
4212 MGMT_OP_START_SERVICE_DISCOVERY,
4213 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4218 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4225 cmd->cmd_complete = service_discovery_cmd_complete;
4227 /* Clear the discovery filter first to free any previously
4228 * allocated memory for the UUID list.
4230 hci_discovery_filter_clear(hdev);
4232 hdev->discovery.result_filtering = true;
4233 hdev->discovery.type = cp->type;
4234 hdev->discovery.rssi = cp->rssi;
4235 hdev->discovery.uuid_count = uuid_count;
4237 if (uuid_count > 0) {
4238 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4240 if (!hdev->discovery.uuids) {
4241 err = mgmt_cmd_complete(sk, hdev->id,
4242 MGMT_OP_START_SERVICE_DISCOVERY,
4244 &cp->type, sizeof(cp->type));
4245 mgmt_pending_remove(cmd);
4250 hci_req_init(&req, hdev);
4252 if (!trigger_discovery(&req, &status)) {
4253 err = mgmt_cmd_complete(sk, hdev->id,
4254 MGMT_OP_START_SERVICE_DISCOVERY,
4255 status, &cp->type, sizeof(cp->type));
4256 mgmt_pending_remove(cmd);
4260 err = hci_req_run(&req, start_discovery_complete);
4262 mgmt_pending_remove(cmd);
4266 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4269 hci_dev_unlock(hdev);
4273 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4275 struct mgmt_pending_cmd *cmd;
4277 BT_DBG("status %d", status);
4281 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4283 cmd->cmd_complete(cmd, mgmt_status(status));
4284 mgmt_pending_remove(cmd);
4288 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4290 hci_dev_unlock(hdev);
4293 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4296 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4297 struct mgmt_pending_cmd *cmd;
4298 struct hci_request req;
4301 BT_DBG("%s", hdev->name);
4305 if (!hci_discovery_active(hdev)) {
4306 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4307 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4308 sizeof(mgmt_cp->type));
4312 if (hdev->discovery.type != mgmt_cp->type) {
4313 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4314 MGMT_STATUS_INVALID_PARAMS,
4315 &mgmt_cp->type, sizeof(mgmt_cp->type));
4319 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4325 cmd->cmd_complete = generic_cmd_complete;
4327 hci_req_init(&req, hdev);
4329 hci_stop_discovery(&req);
4331 err = hci_req_run(&req, stop_discovery_complete);
4333 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4337 mgmt_pending_remove(cmd);
4339 /* If no HCI commands were sent we're done */
4340 if (err == -ENODATA) {
4341 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4342 &mgmt_cp->type, sizeof(mgmt_cp->type));
4343 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4347 hci_dev_unlock(hdev);
4351 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4354 struct mgmt_cp_confirm_name *cp = data;
4355 struct inquiry_entry *e;
4358 BT_DBG("%s", hdev->name);
4362 if (!hci_discovery_active(hdev)) {
4363 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4364 MGMT_STATUS_FAILED, &cp->addr,
4369 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4371 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4372 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4377 if (cp->name_known) {
4378 e->name_state = NAME_KNOWN;
4381 e->name_state = NAME_NEEDED;
4382 hci_inquiry_cache_update_resolve(hdev, e);
4385 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4386 &cp->addr, sizeof(cp->addr));
4389 hci_dev_unlock(hdev);
4393 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4396 struct mgmt_cp_block_device *cp = data;
4400 BT_DBG("%s", hdev->name);
4402 if (!bdaddr_type_is_valid(cp->addr.type))
4403 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4404 MGMT_STATUS_INVALID_PARAMS,
4405 &cp->addr, sizeof(cp->addr));
4409 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4412 status = MGMT_STATUS_FAILED;
4416 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4418 status = MGMT_STATUS_SUCCESS;
4421 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4422 &cp->addr, sizeof(cp->addr));
4424 hci_dev_unlock(hdev);
4429 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4432 struct mgmt_cp_unblock_device *cp = data;
4436 BT_DBG("%s", hdev->name);
4438 if (!bdaddr_type_is_valid(cp->addr.type))
4439 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4440 MGMT_STATUS_INVALID_PARAMS,
4441 &cp->addr, sizeof(cp->addr));
4445 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4448 status = MGMT_STATUS_INVALID_PARAMS;
4452 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4454 status = MGMT_STATUS_SUCCESS;
4457 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4458 &cp->addr, sizeof(cp->addr));
4460 hci_dev_unlock(hdev);
4465 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4468 struct mgmt_cp_set_device_id *cp = data;
4469 struct hci_request req;
4473 BT_DBG("%s", hdev->name);
4475 source = __le16_to_cpu(cp->source);
4477 if (source > 0x0002)
4478 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4479 MGMT_STATUS_INVALID_PARAMS);
4483 hdev->devid_source = source;
4484 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4485 hdev->devid_product = __le16_to_cpu(cp->product);
4486 hdev->devid_version = __le16_to_cpu(cp->version);
4488 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4491 hci_req_init(&req, hdev);
4493 hci_req_run(&req, NULL);
4495 hci_dev_unlock(hdev);
4500 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4503 struct cmd_lookup match = { NULL, hdev };
4508 u8 mgmt_err = mgmt_status(status);
4510 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4511 cmd_status_rsp, &mgmt_err);
4515 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4516 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4518 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4520 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4523 new_settings(hdev, match.sk);
4529 hci_dev_unlock(hdev);
4532 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4535 struct mgmt_mode *cp = data;
4536 struct mgmt_pending_cmd *cmd;
4537 struct hci_request req;
4541 BT_DBG("request for %s", hdev->name);
4543 status = mgmt_le_support(hdev);
4545 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4548 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4549 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4550 MGMT_STATUS_INVALID_PARAMS);
4556 /* The following conditions are ones which mean that we should
4557 * not do any HCI communication but directly send a mgmt
4558 * response to user space (after toggling the flag if
4561 if (!hdev_is_powered(hdev) ||
4562 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4563 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4564 hci_conn_num(hdev, LE_LINK) > 0 ||
4565 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4566 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4570 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4571 if (cp->val == 0x02)
4572 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4574 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4576 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4577 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4580 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4585 err = new_settings(hdev, sk);
4590 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4591 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4597 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4603 hci_req_init(&req, hdev);
4605 if (cp->val == 0x02)
4606 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4608 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4611 enable_advertising(&req);
4613 disable_advertising(&req);
4615 err = hci_req_run(&req, set_advertising_complete);
4617 mgmt_pending_remove(cmd);
4620 hci_dev_unlock(hdev);
4624 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4625 void *data, u16 len)
4627 struct mgmt_cp_set_static_address *cp = data;
4630 BT_DBG("%s", hdev->name);
4632 if (!lmp_le_capable(hdev))
4633 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4634 MGMT_STATUS_NOT_SUPPORTED);
4636 if (hdev_is_powered(hdev))
4637 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4638 MGMT_STATUS_REJECTED);
4640 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4641 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4642 return mgmt_cmd_status(sk, hdev->id,
4643 MGMT_OP_SET_STATIC_ADDRESS,
4644 MGMT_STATUS_INVALID_PARAMS);
4646 /* Two most significant bits shall be set */
4647 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4648 return mgmt_cmd_status(sk, hdev->id,
4649 MGMT_OP_SET_STATIC_ADDRESS,
4650 MGMT_STATUS_INVALID_PARAMS);
4655 bacpy(&hdev->static_addr, &cp->bdaddr);
4657 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4661 err = new_settings(hdev, sk);
4664 hci_dev_unlock(hdev);
4668 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4669 void *data, u16 len)
4671 struct mgmt_cp_set_scan_params *cp = data;
4672 __u16 interval, window;
4675 BT_DBG("%s", hdev->name);
4677 if (!lmp_le_capable(hdev))
4678 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4679 MGMT_STATUS_NOT_SUPPORTED);
4681 interval = __le16_to_cpu(cp->interval);
4683 if (interval < 0x0004 || interval > 0x4000)
4684 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4685 MGMT_STATUS_INVALID_PARAMS);
4687 window = __le16_to_cpu(cp->window);
4689 if (window < 0x0004 || window > 0x4000)
4690 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4691 MGMT_STATUS_INVALID_PARAMS);
4693 if (window > interval)
4694 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4695 MGMT_STATUS_INVALID_PARAMS);
4699 hdev->le_scan_interval = interval;
4700 hdev->le_scan_window = window;
4702 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4705 /* If background scan is running, restart it so new parameters are
4708 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4709 hdev->discovery.state == DISCOVERY_STOPPED) {
4710 struct hci_request req;
4712 hci_req_init(&req, hdev);
4714 hci_req_add_le_scan_disable(&req);
4715 hci_req_add_le_passive_scan(&req);
4717 hci_req_run(&req, NULL);
4720 hci_dev_unlock(hdev);
4725 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4728 struct mgmt_pending_cmd *cmd;
4730 BT_DBG("status 0x%02x", status);
4734 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4739 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4740 mgmt_status(status));
4742 struct mgmt_mode *cp = cmd->param;
4745 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4747 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4749 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4750 new_settings(hdev, cmd->sk);
4753 mgmt_pending_remove(cmd);
4756 hci_dev_unlock(hdev);
4759 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4760 void *data, u16 len)
4762 struct mgmt_mode *cp = data;
4763 struct mgmt_pending_cmd *cmd;
4764 struct hci_request req;
4767 BT_DBG("%s", hdev->name);
4769 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4770 hdev->hci_ver < BLUETOOTH_VER_1_2)
4771 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4772 MGMT_STATUS_NOT_SUPPORTED);
4774 if (cp->val != 0x00 && cp->val != 0x01)
4775 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4776 MGMT_STATUS_INVALID_PARAMS);
4780 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4781 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4786 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4787 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4792 if (!hdev_is_powered(hdev)) {
4793 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4794 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4796 new_settings(hdev, sk);
4800 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4807 hci_req_init(&req, hdev);
4809 write_fast_connectable(&req, cp->val);
4811 err = hci_req_run(&req, fast_connectable_complete);
4813 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4814 MGMT_STATUS_FAILED);
4815 mgmt_pending_remove(cmd);
4819 hci_dev_unlock(hdev);
4824 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4826 struct mgmt_pending_cmd *cmd;
4828 BT_DBG("status 0x%02x", status);
4832 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4837 u8 mgmt_err = mgmt_status(status);
4839 /* We need to restore the flag if related HCI commands
4842 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4844 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4846 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4847 new_settings(hdev, cmd->sk);
4850 mgmt_pending_remove(cmd);
4853 hci_dev_unlock(hdev);
4856 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4858 struct mgmt_mode *cp = data;
4859 struct mgmt_pending_cmd *cmd;
4860 struct hci_request req;
4863 BT_DBG("request for %s", hdev->name);
4865 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4866 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4867 MGMT_STATUS_NOT_SUPPORTED);
4869 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4870 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4871 MGMT_STATUS_REJECTED);
4873 if (cp->val != 0x00 && cp->val != 0x01)
4874 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4875 MGMT_STATUS_INVALID_PARAMS);
4879 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4880 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4884 if (!hdev_is_powered(hdev)) {
4886 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4887 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4888 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4889 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4890 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4893 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4895 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4899 err = new_settings(hdev, sk);
4903 /* Reject disabling when powered on */
4905 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4906 MGMT_STATUS_REJECTED);
4909 /* When configuring a dual-mode controller to operate
4910 * with LE only and using a static address, then switching
4911 * BR/EDR back on is not allowed.
4913 * Dual-mode controllers shall operate with the public
4914 * address as its identity address for BR/EDR and LE. So
4915 * reject the attempt to create an invalid configuration.
4917 * The same restrictions applies when secure connections
4918 * has been enabled. For BR/EDR this is a controller feature
4919 * while for LE it is a host stack feature. This means that
4920 * switching BR/EDR back on when secure connections has been
4921 * enabled is not a supported transaction.
4923 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4924 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4925 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4926 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4927 MGMT_STATUS_REJECTED);
4932 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4933 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4938 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4944 /* We need to flip the bit already here so that update_adv_data
4945 * generates the correct flags.
4947 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4949 hci_req_init(&req, hdev);
4951 write_fast_connectable(&req, false);
4952 __hci_update_page_scan(&req);
4954 /* Since only the advertising data flags will change, there
4955 * is no need to update the scan response data.
4957 update_adv_data(&req);
4959 err = hci_req_run(&req, set_bredr_complete);
4961 mgmt_pending_remove(cmd);
4964 hci_dev_unlock(hdev);
4968 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4970 struct mgmt_pending_cmd *cmd;
4971 struct mgmt_mode *cp;
4973 BT_DBG("%s status %u", hdev->name, status);
4977 cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4982 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4983 mgmt_status(status));
4991 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4992 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4995 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4996 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4999 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5000 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5004 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5005 new_settings(hdev, cmd->sk);
5008 mgmt_pending_remove(cmd);
5010 hci_dev_unlock(hdev);
5013 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5014 void *data, u16 len)
5016 struct mgmt_mode *cp = data;
5017 struct mgmt_pending_cmd *cmd;
5018 struct hci_request req;
5022 BT_DBG("request for %s", hdev->name);
5024 if (!lmp_sc_capable(hdev) &&
5025 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5026 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5027 MGMT_STATUS_NOT_SUPPORTED);
5029 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5030 lmp_sc_capable(hdev) &&
5031 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5032 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5033 MGMT_STATUS_REJECTED);
5035 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5036 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5037 MGMT_STATUS_INVALID_PARAMS);
5041 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5042 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5046 changed = !hci_dev_test_and_set_flag(hdev,
5048 if (cp->val == 0x02)
5049 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5051 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5053 changed = hci_dev_test_and_clear_flag(hdev,
5055 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5058 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5063 err = new_settings(hdev, sk);
5068 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5069 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5076 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5077 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5078 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5082 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5088 hci_req_init(&req, hdev);
5089 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5090 err = hci_req_run(&req, sc_enable_complete);
5092 mgmt_pending_remove(cmd);
5097 hci_dev_unlock(hdev);
5101 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5102 void *data, u16 len)
5104 struct mgmt_mode *cp = data;
5105 bool changed, use_changed;
5108 BT_DBG("request for %s", hdev->name);
5110 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5111 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5112 MGMT_STATUS_INVALID_PARAMS);
5117 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5119 changed = hci_dev_test_and_clear_flag(hdev,
5120 HCI_KEEP_DEBUG_KEYS);
5122 if (cp->val == 0x02)
5123 use_changed = !hci_dev_test_and_set_flag(hdev,
5124 HCI_USE_DEBUG_KEYS);
5126 use_changed = hci_dev_test_and_clear_flag(hdev,
5127 HCI_USE_DEBUG_KEYS);
5129 if (hdev_is_powered(hdev) && use_changed &&
5130 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5131 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5132 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5133 sizeof(mode), &mode);
5136 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5141 err = new_settings(hdev, sk);
5144 hci_dev_unlock(hdev);
5148 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5151 struct mgmt_cp_set_privacy *cp = cp_data;
5155 BT_DBG("request for %s", hdev->name);
5157 if (!lmp_le_capable(hdev))
5158 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5159 MGMT_STATUS_NOT_SUPPORTED);
5161 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5162 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5163 MGMT_STATUS_INVALID_PARAMS);
5165 if (hdev_is_powered(hdev))
5166 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5167 MGMT_STATUS_REJECTED);
5171 /* If user space supports this command it is also expected to
5172 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5174 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5177 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5178 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5179 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5181 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5182 memset(hdev->irk, 0, sizeof(hdev->irk));
5183 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5186 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5191 err = new_settings(hdev, sk);
5194 hci_dev_unlock(hdev);
5198 static bool irk_is_valid(struct mgmt_irk_info *irk)
5200 switch (irk->addr.type) {
5201 case BDADDR_LE_PUBLIC:
5204 case BDADDR_LE_RANDOM:
5205 /* Two most significant bits shall be set */
5206 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5214 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5217 struct mgmt_cp_load_irks *cp = cp_data;
5218 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5219 sizeof(struct mgmt_irk_info));
5220 u16 irk_count, expected_len;
5223 BT_DBG("request for %s", hdev->name);
5225 if (!lmp_le_capable(hdev))
5226 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5227 MGMT_STATUS_NOT_SUPPORTED);
5229 irk_count = __le16_to_cpu(cp->irk_count);
5230 if (irk_count > max_irk_count) {
5231 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5232 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5233 MGMT_STATUS_INVALID_PARAMS);
5236 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5237 if (expected_len != len) {
5238 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5240 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5241 MGMT_STATUS_INVALID_PARAMS);
5244 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5246 for (i = 0; i < irk_count; i++) {
5247 struct mgmt_irk_info *key = &cp->irks[i];
5249 if (!irk_is_valid(key))
5250 return mgmt_cmd_status(sk, hdev->id,
5252 MGMT_STATUS_INVALID_PARAMS);
5257 hci_smp_irks_clear(hdev);
5259 for (i = 0; i < irk_count; i++) {
5260 struct mgmt_irk_info *irk = &cp->irks[i];
5263 if (irk->addr.type == BDADDR_LE_PUBLIC)
5264 addr_type = ADDR_LE_DEV_PUBLIC;
5266 addr_type = ADDR_LE_DEV_RANDOM;
5268 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5272 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5274 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5276 hci_dev_unlock(hdev);
5281 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5283 if (key->master != 0x00 && key->master != 0x01)
5286 switch (key->addr.type) {
5287 case BDADDR_LE_PUBLIC:
5290 case BDADDR_LE_RANDOM:
5291 /* Two most significant bits shall be set */
5292 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5300 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5301 void *cp_data, u16 len)
5303 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5304 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5305 sizeof(struct mgmt_ltk_info));
5306 u16 key_count, expected_len;
5309 BT_DBG("request for %s", hdev->name);
5311 if (!lmp_le_capable(hdev))
5312 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5313 MGMT_STATUS_NOT_SUPPORTED);
5315 key_count = __le16_to_cpu(cp->key_count);
5316 if (key_count > max_key_count) {
5317 BT_ERR("load_ltks: too big key_count value %u", key_count);
5318 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5319 MGMT_STATUS_INVALID_PARAMS);
5322 expected_len = sizeof(*cp) + key_count *
5323 sizeof(struct mgmt_ltk_info);
5324 if (expected_len != len) {
5325 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5327 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5328 MGMT_STATUS_INVALID_PARAMS);
5331 BT_DBG("%s key_count %u", hdev->name, key_count);
5333 for (i = 0; i < key_count; i++) {
5334 struct mgmt_ltk_info *key = &cp->keys[i];
5336 if (!ltk_is_valid(key))
5337 return mgmt_cmd_status(sk, hdev->id,
5338 MGMT_OP_LOAD_LONG_TERM_KEYS,
5339 MGMT_STATUS_INVALID_PARAMS);
5344 hci_smp_ltks_clear(hdev);
5346 for (i = 0; i < key_count; i++) {
5347 struct mgmt_ltk_info *key = &cp->keys[i];
5348 u8 type, addr_type, authenticated;
5350 if (key->addr.type == BDADDR_LE_PUBLIC)
5351 addr_type = ADDR_LE_DEV_PUBLIC;
5353 addr_type = ADDR_LE_DEV_RANDOM;
5355 switch (key->type) {
5356 case MGMT_LTK_UNAUTHENTICATED:
5357 authenticated = 0x00;
5358 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5360 case MGMT_LTK_AUTHENTICATED:
5361 authenticated = 0x01;
5362 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5364 case MGMT_LTK_P256_UNAUTH:
5365 authenticated = 0x00;
5366 type = SMP_LTK_P256;
5368 case MGMT_LTK_P256_AUTH:
5369 authenticated = 0x01;
5370 type = SMP_LTK_P256;
5372 case MGMT_LTK_P256_DEBUG:
5373 authenticated = 0x00;
5374 type = SMP_LTK_P256_DEBUG;
5379 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5380 authenticated, key->val, key->enc_size, key->ediv,
5384 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5387 hci_dev_unlock(hdev);
5392 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5394 struct hci_conn *conn = cmd->user_data;
5395 struct mgmt_rp_get_conn_info rp;
5398 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5400 if (status == MGMT_STATUS_SUCCESS) {
5401 rp.rssi = conn->rssi;
5402 rp.tx_power = conn->tx_power;
5403 rp.max_tx_power = conn->max_tx_power;
5405 rp.rssi = HCI_RSSI_INVALID;
5406 rp.tx_power = HCI_TX_POWER_INVALID;
5407 rp.max_tx_power = HCI_TX_POWER_INVALID;
5410 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5411 status, &rp, sizeof(rp));
5413 hci_conn_drop(conn);
5419 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5422 struct hci_cp_read_rssi *cp;
5423 struct mgmt_pending_cmd *cmd;
5424 struct hci_conn *conn;
5428 BT_DBG("status 0x%02x", hci_status);
5432 /* Commands sent in request are either Read RSSI or Read Transmit Power
5433 * Level so we check which one was last sent to retrieve connection
5434 * handle. Both commands have handle as first parameter so it's safe to
5435 * cast data on the same command struct.
5437 * First command sent is always Read RSSI and we fail only if it fails.
5438 * In other case we simply override error to indicate success as we
5439 * already remembered if TX power value is actually valid.
5441 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5443 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5444 status = MGMT_STATUS_SUCCESS;
5446 status = mgmt_status(hci_status);
5450 BT_ERR("invalid sent_cmd in conn_info response");
5454 handle = __le16_to_cpu(cp->handle);
5455 conn = hci_conn_hash_lookup_handle(hdev, handle);
5457 BT_ERR("unknown handle (%d) in conn_info response", handle);
5461 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5465 cmd->cmd_complete(cmd, status);
5466 mgmt_pending_remove(cmd);
5469 hci_dev_unlock(hdev);
5472 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5475 struct mgmt_cp_get_conn_info *cp = data;
5476 struct mgmt_rp_get_conn_info rp;
5477 struct hci_conn *conn;
5478 unsigned long conn_info_age;
5481 BT_DBG("%s", hdev->name);
5483 memset(&rp, 0, sizeof(rp));
5484 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5485 rp.addr.type = cp->addr.type;
5487 if (!bdaddr_type_is_valid(cp->addr.type))
5488 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5489 MGMT_STATUS_INVALID_PARAMS,
5494 if (!hdev_is_powered(hdev)) {
5495 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5496 MGMT_STATUS_NOT_POWERED, &rp,
5501 if (cp->addr.type == BDADDR_BREDR)
5502 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5505 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5507 if (!conn || conn->state != BT_CONNECTED) {
5508 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5509 MGMT_STATUS_NOT_CONNECTED, &rp,
5514 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5515 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5516 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5520 /* To avoid client trying to guess when to poll again for information we
5521 * calculate conn info age as random value between min/max set in hdev.
5523 conn_info_age = hdev->conn_info_min_age +
5524 prandom_u32_max(hdev->conn_info_max_age -
5525 hdev->conn_info_min_age);
5527 /* Query controller to refresh cached values if they are too old or were
5530 if (time_after(jiffies, conn->conn_info_timestamp +
5531 msecs_to_jiffies(conn_info_age)) ||
5532 !conn->conn_info_timestamp) {
5533 struct hci_request req;
5534 struct hci_cp_read_tx_power req_txp_cp;
5535 struct hci_cp_read_rssi req_rssi_cp;
5536 struct mgmt_pending_cmd *cmd;
5538 hci_req_init(&req, hdev);
5539 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5540 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5543 /* For LE links TX power does not change thus we don't need to
5544 * query for it once value is known.
5546 if (!bdaddr_type_is_le(cp->addr.type) ||
5547 conn->tx_power == HCI_TX_POWER_INVALID) {
5548 req_txp_cp.handle = cpu_to_le16(conn->handle);
5549 req_txp_cp.type = 0x00;
5550 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5551 sizeof(req_txp_cp), &req_txp_cp);
5554 /* Max TX power needs to be read only once per connection */
5555 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5556 req_txp_cp.handle = cpu_to_le16(conn->handle);
5557 req_txp_cp.type = 0x01;
5558 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5559 sizeof(req_txp_cp), &req_txp_cp);
5562 err = hci_req_run(&req, conn_info_refresh_complete);
5566 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5573 hci_conn_hold(conn);
5574 cmd->user_data = hci_conn_get(conn);
5575 cmd->cmd_complete = conn_info_cmd_complete;
5577 conn->conn_info_timestamp = jiffies;
5579 /* Cache is valid, just reply with values cached in hci_conn */
5580 rp.rssi = conn->rssi;
5581 rp.tx_power = conn->tx_power;
5582 rp.max_tx_power = conn->max_tx_power;
5584 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5585 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5589 hci_dev_unlock(hdev);
5593 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5595 struct hci_conn *conn = cmd->user_data;
5596 struct mgmt_rp_get_clock_info rp;
5597 struct hci_dev *hdev;
5600 memset(&rp, 0, sizeof(rp));
5601 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5606 hdev = hci_dev_get(cmd->index);
5608 rp.local_clock = cpu_to_le32(hdev->clock);
5613 rp.piconet_clock = cpu_to_le32(conn->clock);
5614 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5618 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5622 hci_conn_drop(conn);
5629 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5631 struct hci_cp_read_clock *hci_cp;
5632 struct mgmt_pending_cmd *cmd;
5633 struct hci_conn *conn;
5635 BT_DBG("%s status %u", hdev->name, status);
5639 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5643 if (hci_cp->which) {
5644 u16 handle = __le16_to_cpu(hci_cp->handle);
5645 conn = hci_conn_hash_lookup_handle(hdev, handle);
5650 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5654 cmd->cmd_complete(cmd, mgmt_status(status));
5655 mgmt_pending_remove(cmd);
5658 hci_dev_unlock(hdev);
5661 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5664 struct mgmt_cp_get_clock_info *cp = data;
5665 struct mgmt_rp_get_clock_info rp;
5666 struct hci_cp_read_clock hci_cp;
5667 struct mgmt_pending_cmd *cmd;
5668 struct hci_request req;
5669 struct hci_conn *conn;
5672 BT_DBG("%s", hdev->name);
5674 memset(&rp, 0, sizeof(rp));
5675 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5676 rp.addr.type = cp->addr.type;
5678 if (cp->addr.type != BDADDR_BREDR)
5679 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5680 MGMT_STATUS_INVALID_PARAMS,
5685 if (!hdev_is_powered(hdev)) {
5686 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5687 MGMT_STATUS_NOT_POWERED, &rp,
5692 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5693 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5695 if (!conn || conn->state != BT_CONNECTED) {
5696 err = mgmt_cmd_complete(sk, hdev->id,
5697 MGMT_OP_GET_CLOCK_INFO,
5698 MGMT_STATUS_NOT_CONNECTED,
5706 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5712 cmd->cmd_complete = clock_info_cmd_complete;
5714 hci_req_init(&req, hdev);
5716 memset(&hci_cp, 0, sizeof(hci_cp));
5717 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5720 hci_conn_hold(conn);
5721 cmd->user_data = hci_conn_get(conn);
5723 hci_cp.handle = cpu_to_le16(conn->handle);
5724 hci_cp.which = 0x01; /* Piconet clock */
5725 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5728 err = hci_req_run(&req, get_clock_info_complete);
5730 mgmt_pending_remove(cmd);
5733 hci_dev_unlock(hdev);
5737 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5739 struct hci_conn *conn;
5741 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5745 if (conn->dst_type != type)
5748 if (conn->state != BT_CONNECTED)
5754 /* This function requires the caller holds hdev->lock */
5755 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5756 u8 addr_type, u8 auto_connect)
5758 struct hci_dev *hdev = req->hdev;
5759 struct hci_conn_params *params;
5761 params = hci_conn_params_add(hdev, addr, addr_type);
5765 if (params->auto_connect == auto_connect)
5768 list_del_init(¶ms->action);
5770 switch (auto_connect) {
5771 case HCI_AUTO_CONN_DISABLED:
5772 case HCI_AUTO_CONN_LINK_LOSS:
5773 __hci_update_background_scan(req);
5775 case HCI_AUTO_CONN_REPORT:
5776 list_add(¶ms->action, &hdev->pend_le_reports);
5777 __hci_update_background_scan(req);
5779 case HCI_AUTO_CONN_DIRECT:
5780 case HCI_AUTO_CONN_ALWAYS:
5781 if (!is_connected(hdev, addr, addr_type)) {
5782 list_add(¶ms->action, &hdev->pend_le_conns);
5783 __hci_update_background_scan(req);
5788 params->auto_connect = auto_connect;
5790 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5796 static void device_added(struct sock *sk, struct hci_dev *hdev,
5797 bdaddr_t *bdaddr, u8 type, u8 action)
5799 struct mgmt_ev_device_added ev;
5801 bacpy(&ev.addr.bdaddr, bdaddr);
5802 ev.addr.type = type;
5805 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5808 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5810 struct mgmt_pending_cmd *cmd;
5812 BT_DBG("status 0x%02x", status);
5816 cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
5820 cmd->cmd_complete(cmd, mgmt_status(status));
5821 mgmt_pending_remove(cmd);
5824 hci_dev_unlock(hdev);
5827 static int add_device(struct sock *sk, struct hci_dev *hdev,
5828 void *data, u16 len)
5830 struct mgmt_cp_add_device *cp = data;
5831 struct mgmt_pending_cmd *cmd;
5832 struct hci_request req;
5833 u8 auto_conn, addr_type;
5836 BT_DBG("%s", hdev->name);
5838 if (!bdaddr_type_is_valid(cp->addr.type) ||
5839 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5840 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5841 MGMT_STATUS_INVALID_PARAMS,
5842 &cp->addr, sizeof(cp->addr));
5844 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5845 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5846 MGMT_STATUS_INVALID_PARAMS,
5847 &cp->addr, sizeof(cp->addr));
5849 hci_req_init(&req, hdev);
5853 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5859 cmd->cmd_complete = addr_cmd_complete;
5861 if (cp->addr.type == BDADDR_BREDR) {
5862 /* Only incoming connections action is supported for now */
5863 if (cp->action != 0x01) {
5864 err = cmd->cmd_complete(cmd,
5865 MGMT_STATUS_INVALID_PARAMS);
5866 mgmt_pending_remove(cmd);
5870 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5875 __hci_update_page_scan(&req);
5880 if (cp->addr.type == BDADDR_LE_PUBLIC)
5881 addr_type = ADDR_LE_DEV_PUBLIC;
5883 addr_type = ADDR_LE_DEV_RANDOM;
5885 if (cp->action == 0x02)
5886 auto_conn = HCI_AUTO_CONN_ALWAYS;
5887 else if (cp->action == 0x01)
5888 auto_conn = HCI_AUTO_CONN_DIRECT;
5890 auto_conn = HCI_AUTO_CONN_REPORT;
5892 /* If the connection parameters don't exist for this device,
5893 * they will be created and configured with defaults.
5895 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5897 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5898 mgmt_pending_remove(cmd);
5903 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5905 err = hci_req_run(&req, add_device_complete);
5907 /* ENODATA means no HCI commands were needed (e.g. if
5908 * the adapter is powered off).
5910 if (err == -ENODATA)
5911 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5912 mgmt_pending_remove(cmd);
5916 hci_dev_unlock(hdev);
5920 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5921 bdaddr_t *bdaddr, u8 type)
5923 struct mgmt_ev_device_removed ev;
5925 bacpy(&ev.addr.bdaddr, bdaddr);
5926 ev.addr.type = type;
5928 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5931 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5933 struct mgmt_pending_cmd *cmd;
5935 BT_DBG("status 0x%02x", status);
5939 cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5943 cmd->cmd_complete(cmd, mgmt_status(status));
5944 mgmt_pending_remove(cmd);
5947 hci_dev_unlock(hdev);
5950 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5951 void *data, u16 len)
5953 struct mgmt_cp_remove_device *cp = data;
5954 struct mgmt_pending_cmd *cmd;
5955 struct hci_request req;
5958 BT_DBG("%s", hdev->name);
5960 hci_req_init(&req, hdev);
5964 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5970 cmd->cmd_complete = addr_cmd_complete;
5972 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5973 struct hci_conn_params *params;
5976 if (!bdaddr_type_is_valid(cp->addr.type)) {
5977 err = cmd->cmd_complete(cmd,
5978 MGMT_STATUS_INVALID_PARAMS);
5979 mgmt_pending_remove(cmd);
5983 if (cp->addr.type == BDADDR_BREDR) {
5984 err = hci_bdaddr_list_del(&hdev->whitelist,
5988 err = cmd->cmd_complete(cmd,
5989 MGMT_STATUS_INVALID_PARAMS);
5990 mgmt_pending_remove(cmd);
5994 __hci_update_page_scan(&req);
5996 device_removed(sk, hdev, &cp->addr.bdaddr,
6001 if (cp->addr.type == BDADDR_LE_PUBLIC)
6002 addr_type = ADDR_LE_DEV_PUBLIC;
6004 addr_type = ADDR_LE_DEV_RANDOM;
6006 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6009 err = cmd->cmd_complete(cmd,
6010 MGMT_STATUS_INVALID_PARAMS);
6011 mgmt_pending_remove(cmd);
6015 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
6016 err = cmd->cmd_complete(cmd,
6017 MGMT_STATUS_INVALID_PARAMS);
6018 mgmt_pending_remove(cmd);
6022 list_del(¶ms->action);
6023 list_del(¶ms->list);
6025 __hci_update_background_scan(&req);
6027 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6029 struct hci_conn_params *p, *tmp;
6030 struct bdaddr_list *b, *btmp;
6032 if (cp->addr.type) {
6033 err = cmd->cmd_complete(cmd,
6034 MGMT_STATUS_INVALID_PARAMS);
6035 mgmt_pending_remove(cmd);
6039 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6040 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6045 __hci_update_page_scan(&req);
6047 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6048 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6050 device_removed(sk, hdev, &p->addr, p->addr_type);
6051 list_del(&p->action);
6056 BT_DBG("All LE connection parameters were removed");
6058 __hci_update_background_scan(&req);
6062 err = hci_req_run(&req, remove_device_complete);
6064 /* ENODATA means no HCI commands were needed (e.g. if
6065 * the adapter is powered off).
6067 if (err == -ENODATA)
6068 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6069 mgmt_pending_remove(cmd);
6073 hci_dev_unlock(hdev);
6077 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6080 struct mgmt_cp_load_conn_param *cp = data;
6081 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6082 sizeof(struct mgmt_conn_param));
6083 u16 param_count, expected_len;
6086 if (!lmp_le_capable(hdev))
6087 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6088 MGMT_STATUS_NOT_SUPPORTED);
6090 param_count = __le16_to_cpu(cp->param_count);
6091 if (param_count > max_param_count) {
6092 BT_ERR("load_conn_param: too big param_count value %u",
6094 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6095 MGMT_STATUS_INVALID_PARAMS);
6098 expected_len = sizeof(*cp) + param_count *
6099 sizeof(struct mgmt_conn_param);
6100 if (expected_len != len) {
6101 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6103 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6104 MGMT_STATUS_INVALID_PARAMS);
6107 BT_DBG("%s param_count %u", hdev->name, param_count);
6111 hci_conn_params_clear_disabled(hdev);
6113 for (i = 0; i < param_count; i++) {
6114 struct mgmt_conn_param *param = &cp->params[i];
6115 struct hci_conn_params *hci_param;
6116 u16 min, max, latency, timeout;
6119 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
6122 if (param->addr.type == BDADDR_LE_PUBLIC) {
6123 addr_type = ADDR_LE_DEV_PUBLIC;
6124 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6125 addr_type = ADDR_LE_DEV_RANDOM;
6127 BT_ERR("Ignoring invalid connection parameters");
6131 min = le16_to_cpu(param->min_interval);
6132 max = le16_to_cpu(param->max_interval);
6133 latency = le16_to_cpu(param->latency);
6134 timeout = le16_to_cpu(param->timeout);
6136 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6137 min, max, latency, timeout);
6139 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6140 BT_ERR("Ignoring invalid connection parameters");
6144 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6147 BT_ERR("Failed to add connection parameters");
6151 hci_param->conn_min_interval = min;
6152 hci_param->conn_max_interval = max;
6153 hci_param->conn_latency = latency;
6154 hci_param->supervision_timeout = timeout;
6157 hci_dev_unlock(hdev);
6159 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6163 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6164 void *data, u16 len)
6166 struct mgmt_cp_set_external_config *cp = data;
6170 BT_DBG("%s", hdev->name);
6172 if (hdev_is_powered(hdev))
6173 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6174 MGMT_STATUS_REJECTED);
6176 if (cp->config != 0x00 && cp->config != 0x01)
6177 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6178 MGMT_STATUS_INVALID_PARAMS);
6180 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6181 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6182 MGMT_STATUS_NOT_SUPPORTED);
6187 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6189 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6191 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6198 err = new_options(hdev, sk);
6200 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6201 mgmt_index_removed(hdev);
6203 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6204 hci_dev_set_flag(hdev, HCI_CONFIG);
6205 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6207 queue_work(hdev->req_workqueue, &hdev->power_on);
6209 set_bit(HCI_RAW, &hdev->flags);
6210 mgmt_index_added(hdev);
6215 hci_dev_unlock(hdev);
6219 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6220 void *data, u16 len)
6222 struct mgmt_cp_set_public_address *cp = data;
6226 BT_DBG("%s", hdev->name);
6228 if (hdev_is_powered(hdev))
6229 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6230 MGMT_STATUS_REJECTED);
6232 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6233 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6234 MGMT_STATUS_INVALID_PARAMS);
6236 if (!hdev->set_bdaddr)
6237 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6238 MGMT_STATUS_NOT_SUPPORTED);
6242 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6243 bacpy(&hdev->public_addr, &cp->bdaddr);
6245 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6252 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6253 err = new_options(hdev, sk);
6255 if (is_configured(hdev)) {
6256 mgmt_index_removed(hdev);
6258 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6260 hci_dev_set_flag(hdev, HCI_CONFIG);
6261 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6263 queue_work(hdev->req_workqueue, &hdev->power_on);
6267 hci_dev_unlock(hdev);
6271 static const struct hci_mgmt_handler mgmt_handlers[] = {
6272 { NULL }, /* 0x0000 (no command) */
6273 { read_version, MGMT_READ_VERSION_SIZE,
6275 { read_commands, MGMT_READ_COMMANDS_SIZE,
6277 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6279 { read_controller_info, MGMT_READ_INFO_SIZE, 0 },
6280 { set_powered, MGMT_SETTING_SIZE, 0 },
6281 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE, 0 },
6282 { set_connectable, MGMT_SETTING_SIZE, 0 },
6283 { set_fast_connectable, MGMT_SETTING_SIZE, 0 },
6284 { set_bondable, MGMT_SETTING_SIZE, 0 },
6285 { set_link_security, MGMT_SETTING_SIZE, 0 },
6286 { set_ssp, MGMT_SETTING_SIZE, 0 },
6287 { set_hs, MGMT_SETTING_SIZE, 0 },
6288 { set_le, MGMT_SETTING_SIZE, 0 },
6289 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE, 0 },
6290 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE, 0 },
6291 { add_uuid, MGMT_ADD_UUID_SIZE, 0 },
6292 { remove_uuid, MGMT_REMOVE_UUID_SIZE, 0 },
6293 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6295 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6297 { disconnect, MGMT_DISCONNECT_SIZE, 0 },
6298 { get_connections, MGMT_GET_CONNECTIONS_SIZE, 0 },
6299 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE, 0 },
6300 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE, 0 },
6301 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE, 0 },
6302 { pair_device, MGMT_PAIR_DEVICE_SIZE, 0 },
6303 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE, 0 },
6304 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE, 0 },
6305 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE, 0 },
6306 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE, 0 },
6307 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE, 0 },
6308 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE, 0 },
6309 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6310 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6312 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE, 0 },
6313 { start_discovery, MGMT_START_DISCOVERY_SIZE, 0 },
6314 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE, 0 },
6315 { confirm_name, MGMT_CONFIRM_NAME_SIZE, 0 },
6316 { block_device, MGMT_BLOCK_DEVICE_SIZE, 0 },
6317 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE, 0 },
6318 { set_device_id, MGMT_SET_DEVICE_ID_SIZE, 0 },
6319 { set_advertising, MGMT_SETTING_SIZE, 0 },
6320 { set_bredr, MGMT_SETTING_SIZE, 0 },
6321 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE, 0 },
6322 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE, 0 },
6323 { set_secure_conn, MGMT_SETTING_SIZE, 0 },
6324 { set_debug_keys, MGMT_SETTING_SIZE, 0 },
6325 { set_privacy, MGMT_SET_PRIVACY_SIZE, 0 },
6326 { load_irks, MGMT_LOAD_IRKS_SIZE,
6328 { get_conn_info, MGMT_GET_CONN_INFO_SIZE, 0 },
6329 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE, 0 },
6330 { add_device, MGMT_ADD_DEVICE_SIZE, 0 },
6331 { remove_device, MGMT_REMOVE_DEVICE_SIZE, 0 },
6332 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6334 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6336 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6337 HCI_MGMT_UNCONFIGURED },
6338 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6339 HCI_MGMT_UNCONFIGURED },
6340 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6341 HCI_MGMT_UNCONFIGURED },
6342 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6345 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
6349 int mgmt_control(struct hci_mgmt_chan *chan, struct sock *sk,
6350 struct msghdr *msg, size_t msglen)
6354 struct mgmt_hdr *hdr;
6355 u16 opcode, index, len;
6356 struct hci_dev *hdev = NULL;
6357 const struct hci_mgmt_handler *handler;
6358 bool var_len, no_hdev;
6361 BT_DBG("got %zu bytes", msglen);
6363 if (msglen < sizeof(*hdr))
6366 buf = kmalloc(msglen, GFP_KERNEL);
6370 if (memcpy_from_msg(buf, msg, msglen)) {
6376 opcode = __le16_to_cpu(hdr->opcode);
6377 index = __le16_to_cpu(hdr->index);
6378 len = __le16_to_cpu(hdr->len);
6380 if (len != msglen - sizeof(*hdr)) {
6385 if (opcode >= chan->handler_count ||
6386 chan->handlers[opcode].func == NULL) {
6387 BT_DBG("Unknown op %u", opcode);
6388 err = mgmt_cmd_status(sk, index, opcode,
6389 MGMT_STATUS_UNKNOWN_COMMAND);
6393 handler = &chan->handlers[opcode];
6395 if (index != MGMT_INDEX_NONE) {
6396 hdev = hci_dev_get(index);
6398 err = mgmt_cmd_status(sk, index, opcode,
6399 MGMT_STATUS_INVALID_INDEX);
6403 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
6404 hci_dev_test_flag(hdev, HCI_CONFIG) ||
6405 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
6406 err = mgmt_cmd_status(sk, index, opcode,
6407 MGMT_STATUS_INVALID_INDEX);
6411 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
6412 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
6413 err = mgmt_cmd_status(sk, index, opcode,
6414 MGMT_STATUS_INVALID_INDEX);
6419 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
6420 if (no_hdev != !hdev) {
6421 err = mgmt_cmd_status(sk, index, opcode,
6422 MGMT_STATUS_INVALID_INDEX);
6426 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
6427 if ((var_len && len < handler->data_len) ||
6428 (!var_len && len != handler->data_len)) {
6429 err = mgmt_cmd_status(sk, index, opcode,
6430 MGMT_STATUS_INVALID_PARAMS);
6435 mgmt_init_hdev(sk, hdev);
6437 cp = buf + sizeof(*hdr);
6439 err = handler->func(sk, hdev, cp, len);
6453 void mgmt_index_added(struct hci_dev *hdev)
6455 struct mgmt_ev_ext_index ev;
6457 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6460 switch (hdev->dev_type) {
6462 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6463 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
6464 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6467 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
6468 HCI_MGMT_INDEX_EVENTS);
6481 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
6482 HCI_MGMT_EXT_INDEX_EVENTS);
6485 void mgmt_index_removed(struct hci_dev *hdev)
6487 struct mgmt_ev_ext_index ev;
6488 u8 status = MGMT_STATUS_INVALID_INDEX;
6490 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6493 switch (hdev->dev_type) {
6495 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6497 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6498 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
6499 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6502 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
6503 HCI_MGMT_INDEX_EVENTS);
6516 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
6517 HCI_MGMT_EXT_INDEX_EVENTS);
6520 /* This function requires the caller holds hdev->lock */
6521 static void restart_le_actions(struct hci_request *req)
6523 struct hci_dev *hdev = req->hdev;
6524 struct hci_conn_params *p;
6526 list_for_each_entry(p, &hdev->le_conn_params, list) {
6527 /* Needed for AUTO_OFF case where might not "really"
6528 * have been powered off.
6530 list_del_init(&p->action);
6532 switch (p->auto_connect) {
6533 case HCI_AUTO_CONN_DIRECT:
6534 case HCI_AUTO_CONN_ALWAYS:
6535 list_add(&p->action, &hdev->pend_le_conns);
6537 case HCI_AUTO_CONN_REPORT:
6538 list_add(&p->action, &hdev->pend_le_reports);
6545 __hci_update_background_scan(req);
6548 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6550 struct cmd_lookup match = { NULL, hdev };
6552 BT_DBG("status 0x%02x", status);
6555 /* Register the available SMP channels (BR/EDR and LE) only
6556 * when successfully powering on the controller. This late
6557 * registration is required so that LE SMP can clearly
6558 * decide if the public address or static address is used.
6565 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6567 new_settings(hdev, match.sk);
6569 hci_dev_unlock(hdev);
6575 static int powered_update_hci(struct hci_dev *hdev)
6577 struct hci_request req;
6580 hci_req_init(&req, hdev);
6582 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
6583 !lmp_host_ssp_capable(hdev)) {
6586 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
6588 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6591 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
6592 sizeof(support), &support);
6596 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
6597 lmp_bredr_capable(hdev)) {
6598 struct hci_cp_write_le_host_supported cp;
6603 /* Check first if we already have the right
6604 * host state (host features set)
6606 if (cp.le != lmp_host_le_capable(hdev) ||
6607 cp.simul != lmp_host_le_br_capable(hdev))
6608 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6612 if (lmp_le_capable(hdev)) {
6613 /* Make sure the controller has a good default for
6614 * advertising data. This also applies to the case
6615 * where BR/EDR was toggled during the AUTO_OFF phase.
6617 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
6618 update_adv_data(&req);
6619 update_scan_rsp_data(&req);
6622 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6623 enable_advertising(&req);
6625 restart_le_actions(&req);
6628 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
6629 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6630 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6631 sizeof(link_sec), &link_sec);
6633 if (lmp_bredr_capable(hdev)) {
6634 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
6635 write_fast_connectable(&req, true);
6637 write_fast_connectable(&req, false);
6638 __hci_update_page_scan(&req);
6644 return hci_req_run(&req, powered_complete);
6647 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6649 struct cmd_lookup match = { NULL, hdev };
6650 u8 status, zero_cod[] = { 0, 0, 0 };
6653 if (!hci_dev_test_flag(hdev, HCI_MGMT))
6657 if (powered_update_hci(hdev) == 0)
6660 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6665 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6667 /* If the power off is because of hdev unregistration let
6668 * use the appropriate INVALID_INDEX status. Otherwise use
6669 * NOT_POWERED. We cover both scenarios here since later in
6670 * mgmt_index_removed() any hci_conn callbacks will have already
6671 * been triggered, potentially causing misleading DISCONNECTED
6674 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
6675 status = MGMT_STATUS_INVALID_INDEX;
6677 status = MGMT_STATUS_NOT_POWERED;
6679 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6681 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6682 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6683 zero_cod, sizeof(zero_cod), NULL);
6686 err = new_settings(hdev, match.sk);
6694 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6696 struct mgmt_pending_cmd *cmd;
6699 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6703 if (err == -ERFKILL)
6704 status = MGMT_STATUS_RFKILLED;
6706 status = MGMT_STATUS_FAILED;
6708 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6710 mgmt_pending_remove(cmd);
6713 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6715 struct hci_request req;
6719 /* When discoverable timeout triggers, then just make sure
6720 * the limited discoverable flag is cleared. Even in the case
6721 * of a timeout triggered from general discoverable, it is
6722 * safe to unconditionally clear the flag.
6724 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
6725 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6727 hci_req_init(&req, hdev);
6728 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6729 u8 scan = SCAN_PAGE;
6730 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6731 sizeof(scan), &scan);
6734 update_adv_data(&req);
6735 hci_req_run(&req, NULL);
6737 hdev->discov_timeout = 0;
6739 new_settings(hdev, NULL);
6741 hci_dev_unlock(hdev);
6744 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6747 struct mgmt_ev_new_link_key ev;
6749 memset(&ev, 0, sizeof(ev));
6751 ev.store_hint = persistent;
6752 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6753 ev.key.addr.type = BDADDR_BREDR;
6754 ev.key.type = key->type;
6755 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6756 ev.key.pin_len = key->pin_len;
6758 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6761 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6763 switch (ltk->type) {
6766 if (ltk->authenticated)
6767 return MGMT_LTK_AUTHENTICATED;
6768 return MGMT_LTK_UNAUTHENTICATED;
6770 if (ltk->authenticated)
6771 return MGMT_LTK_P256_AUTH;
6772 return MGMT_LTK_P256_UNAUTH;
6773 case SMP_LTK_P256_DEBUG:
6774 return MGMT_LTK_P256_DEBUG;
6777 return MGMT_LTK_UNAUTHENTICATED;
6780 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6782 struct mgmt_ev_new_long_term_key ev;
6784 memset(&ev, 0, sizeof(ev));
6786 /* Devices using resolvable or non-resolvable random addresses
6787 * without providing an indentity resolving key don't require
6788 * to store long term keys. Their addresses will change the
6791 * Only when a remote device provides an identity address
6792 * make sure the long term key is stored. If the remote
6793 * identity is known, the long term keys are internally
6794 * mapped to the identity address. So allow static random
6795 * and public addresses here.
6797 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6798 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6799 ev.store_hint = 0x00;
6801 ev.store_hint = persistent;
6803 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6804 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6805 ev.key.type = mgmt_ltk_type(key);
6806 ev.key.enc_size = key->enc_size;
6807 ev.key.ediv = key->ediv;
6808 ev.key.rand = key->rand;
6810 if (key->type == SMP_LTK)
6813 memcpy(ev.key.val, key->val, sizeof(key->val));
6815 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6818 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6820 struct mgmt_ev_new_irk ev;
6822 memset(&ev, 0, sizeof(ev));
6824 /* For identity resolving keys from devices that are already
6825 * using a public address or static random address, do not
6826 * ask for storing this key. The identity resolving key really
6827 * is only mandatory for devices using resovlable random
6830 * Storing all identity resolving keys has the downside that
6831 * they will be also loaded on next boot of they system. More
6832 * identity resolving keys, means more time during scanning is
6833 * needed to actually resolve these addresses.
6835 if (bacmp(&irk->rpa, BDADDR_ANY))
6836 ev.store_hint = 0x01;
6838 ev.store_hint = 0x00;
6840 bacpy(&ev.rpa, &irk->rpa);
6841 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6842 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6843 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6845 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6848 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6851 struct mgmt_ev_new_csrk ev;
6853 memset(&ev, 0, sizeof(ev));
6855 /* Devices using resolvable or non-resolvable random addresses
6856 * without providing an indentity resolving key don't require
6857 * to store signature resolving keys. Their addresses will change
6858 * the next time around.
6860 * Only when a remote device provides an identity address
6861 * make sure the signature resolving key is stored. So allow
6862 * static random and public addresses here.
6864 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6865 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6866 ev.store_hint = 0x00;
6868 ev.store_hint = persistent;
6870 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6871 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6872 ev.key.type = csrk->type;
6873 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6875 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6878 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6879 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6880 u16 max_interval, u16 latency, u16 timeout)
6882 struct mgmt_ev_new_conn_param ev;
6884 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6887 memset(&ev, 0, sizeof(ev));
6888 bacpy(&ev.addr.bdaddr, bdaddr);
6889 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6890 ev.store_hint = store_hint;
6891 ev.min_interval = cpu_to_le16(min_interval);
6892 ev.max_interval = cpu_to_le16(max_interval);
6893 ev.latency = cpu_to_le16(latency);
6894 ev.timeout = cpu_to_le16(timeout);
6896 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6899 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6902 eir[eir_len++] = sizeof(type) + data_len;
6903 eir[eir_len++] = type;
6904 memcpy(&eir[eir_len], data, data_len);
6905 eir_len += data_len;
6910 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6911 u32 flags, u8 *name, u8 name_len)
6914 struct mgmt_ev_device_connected *ev = (void *) buf;
6917 bacpy(&ev->addr.bdaddr, &conn->dst);
6918 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6920 ev->flags = __cpu_to_le32(flags);
6922 /* We must ensure that the EIR Data fields are ordered and
6923 * unique. Keep it simple for now and avoid the problem by not
6924 * adding any BR/EDR data to the LE adv.
6926 if (conn->le_adv_data_len > 0) {
6927 memcpy(&ev->eir[eir_len],
6928 conn->le_adv_data, conn->le_adv_data_len);
6929 eir_len = conn->le_adv_data_len;
6932 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6935 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6936 eir_len = eir_append_data(ev->eir, eir_len,
6938 conn->dev_class, 3);
6941 ev->eir_len = cpu_to_le16(eir_len);
6943 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6944 sizeof(*ev) + eir_len, NULL);
6947 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
6949 struct sock **sk = data;
6951 cmd->cmd_complete(cmd, 0);
6956 mgmt_pending_remove(cmd);
6959 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
6961 struct hci_dev *hdev = data;
6962 struct mgmt_cp_unpair_device *cp = cmd->param;
6964 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6966 cmd->cmd_complete(cmd, 0);
6967 mgmt_pending_remove(cmd);
6970 bool mgmt_powering_down(struct hci_dev *hdev)
6972 struct mgmt_pending_cmd *cmd;
6973 struct mgmt_mode *cp;
6975 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6986 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6987 u8 link_type, u8 addr_type, u8 reason,
6988 bool mgmt_connected)
6990 struct mgmt_ev_device_disconnected ev;
6991 struct sock *sk = NULL;
6993 /* The connection is still in hci_conn_hash so test for 1
6994 * instead of 0 to know if this is the last one.
6996 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6997 cancel_delayed_work(&hdev->power_off);
6998 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7001 if (!mgmt_connected)
7004 if (link_type != ACL_LINK && link_type != LE_LINK)
7007 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7009 bacpy(&ev.addr.bdaddr, bdaddr);
7010 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7013 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7018 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7022 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7023 u8 link_type, u8 addr_type, u8 status)
7025 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7026 struct mgmt_cp_disconnect *cp;
7027 struct mgmt_pending_cmd *cmd;
7029 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7032 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
7038 if (bacmp(bdaddr, &cp->addr.bdaddr))
7041 if (cp->addr.type != bdaddr_type)
7044 cmd->cmd_complete(cmd, mgmt_status(status));
7045 mgmt_pending_remove(cmd);
7048 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7049 u8 addr_type, u8 status)
7051 struct mgmt_ev_connect_failed ev;
7053 /* The connection is still in hci_conn_hash so test for 1
7054 * instead of 0 to know if this is the last one.
7056 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7057 cancel_delayed_work(&hdev->power_off);
7058 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7061 bacpy(&ev.addr.bdaddr, bdaddr);
7062 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7063 ev.status = mgmt_status(status);
7065 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7068 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7070 struct mgmt_ev_pin_code_request ev;
7072 bacpy(&ev.addr.bdaddr, bdaddr);
7073 ev.addr.type = BDADDR_BREDR;
7076 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7079 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7082 struct mgmt_pending_cmd *cmd;
7084 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7088 cmd->cmd_complete(cmd, mgmt_status(status));
7089 mgmt_pending_remove(cmd);
7092 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7095 struct mgmt_pending_cmd *cmd;
7097 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7101 cmd->cmd_complete(cmd, mgmt_status(status));
7102 mgmt_pending_remove(cmd);
7105 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7106 u8 link_type, u8 addr_type, u32 value,
7109 struct mgmt_ev_user_confirm_request ev;
7111 BT_DBG("%s", hdev->name);
7113 bacpy(&ev.addr.bdaddr, bdaddr);
7114 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7115 ev.confirm_hint = confirm_hint;
7116 ev.value = cpu_to_le32(value);
7118 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7122 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7123 u8 link_type, u8 addr_type)
7125 struct mgmt_ev_user_passkey_request ev;
7127 BT_DBG("%s", hdev->name);
7129 bacpy(&ev.addr.bdaddr, bdaddr);
7130 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7132 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7136 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7137 u8 link_type, u8 addr_type, u8 status,
7140 struct mgmt_pending_cmd *cmd;
7142 cmd = mgmt_pending_find(opcode, hdev);
7146 cmd->cmd_complete(cmd, mgmt_status(status));
7147 mgmt_pending_remove(cmd);
7152 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7153 u8 link_type, u8 addr_type, u8 status)
7155 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7156 status, MGMT_OP_USER_CONFIRM_REPLY);
7159 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7160 u8 link_type, u8 addr_type, u8 status)
7162 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7164 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7167 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7168 u8 link_type, u8 addr_type, u8 status)
7170 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7171 status, MGMT_OP_USER_PASSKEY_REPLY);
7174 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7175 u8 link_type, u8 addr_type, u8 status)
7177 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7179 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7182 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7183 u8 link_type, u8 addr_type, u32 passkey,
7186 struct mgmt_ev_passkey_notify ev;
7188 BT_DBG("%s", hdev->name);
7190 bacpy(&ev.addr.bdaddr, bdaddr);
7191 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7192 ev.passkey = __cpu_to_le32(passkey);
7193 ev.entered = entered;
7195 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7198 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7200 struct mgmt_ev_auth_failed ev;
7201 struct mgmt_pending_cmd *cmd;
7202 u8 status = mgmt_status(hci_status);
7204 bacpy(&ev.addr.bdaddr, &conn->dst);
7205 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7208 cmd = find_pairing(conn);
7210 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7211 cmd ? cmd->sk : NULL);
7214 cmd->cmd_complete(cmd, status);
7215 mgmt_pending_remove(cmd);
7219 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7221 struct cmd_lookup match = { NULL, hdev };
7225 u8 mgmt_err = mgmt_status(status);
7226 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7227 cmd_status_rsp, &mgmt_err);
7231 if (test_bit(HCI_AUTH, &hdev->flags))
7232 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7234 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7236 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7240 new_settings(hdev, match.sk);
7246 static void clear_eir(struct hci_request *req)
7248 struct hci_dev *hdev = req->hdev;
7249 struct hci_cp_write_eir cp;
7251 if (!lmp_ext_inq_capable(hdev))
7254 memset(hdev->eir, 0, sizeof(hdev->eir));
7256 memset(&cp, 0, sizeof(cp));
7258 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7261 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7263 struct cmd_lookup match = { NULL, hdev };
7264 struct hci_request req;
7265 bool changed = false;
7268 u8 mgmt_err = mgmt_status(status);
7270 if (enable && hci_dev_test_and_clear_flag(hdev,
7272 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7273 new_settings(hdev, NULL);
7276 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7282 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7284 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7286 changed = hci_dev_test_and_clear_flag(hdev,
7289 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7292 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7295 new_settings(hdev, match.sk);
7300 hci_req_init(&req, hdev);
7302 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7303 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7304 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7305 sizeof(enable), &enable);
7311 hci_req_run(&req, NULL);
7314 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7316 struct cmd_lookup *match = data;
7318 if (match->sk == NULL) {
7319 match->sk = cmd->sk;
7320 sock_hold(match->sk);
7324 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7327 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7329 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7330 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7331 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7334 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
7341 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7343 struct mgmt_cp_set_local_name ev;
7344 struct mgmt_pending_cmd *cmd;
7349 memset(&ev, 0, sizeof(ev));
7350 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7351 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7353 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7355 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7357 /* If this is a HCI command related to powering on the
7358 * HCI dev don't send any mgmt signals.
7360 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
7364 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7365 cmd ? cmd->sk : NULL);
7368 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7369 u8 *rand192, u8 *hash256, u8 *rand256,
7372 struct mgmt_pending_cmd *cmd;
7374 BT_DBG("%s status %u", hdev->name, status);
7376 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7381 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7382 mgmt_status(status));
7384 struct mgmt_rp_read_local_oob_data rp;
7385 size_t rp_size = sizeof(rp);
7387 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7388 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7390 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7391 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7392 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7394 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7397 mgmt_cmd_complete(cmd->sk, hdev->id,
7398 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7402 mgmt_pending_remove(cmd);
7405 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7409 for (i = 0; i < uuid_count; i++) {
7410 if (!memcmp(uuid, uuids[i], 16))
7417 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7421 while (parsed < eir_len) {
7422 u8 field_len = eir[0];
7429 if (eir_len - parsed < field_len + 1)
7433 case EIR_UUID16_ALL:
7434 case EIR_UUID16_SOME:
7435 for (i = 0; i + 3 <= field_len; i += 2) {
7436 memcpy(uuid, bluetooth_base_uuid, 16);
7437 uuid[13] = eir[i + 3];
7438 uuid[12] = eir[i + 2];
7439 if (has_uuid(uuid, uuid_count, uuids))
7443 case EIR_UUID32_ALL:
7444 case EIR_UUID32_SOME:
7445 for (i = 0; i + 5 <= field_len; i += 4) {
7446 memcpy(uuid, bluetooth_base_uuid, 16);
7447 uuid[15] = eir[i + 5];
7448 uuid[14] = eir[i + 4];
7449 uuid[13] = eir[i + 3];
7450 uuid[12] = eir[i + 2];
7451 if (has_uuid(uuid, uuid_count, uuids))
7455 case EIR_UUID128_ALL:
7456 case EIR_UUID128_SOME:
7457 for (i = 0; i + 17 <= field_len; i += 16) {
7458 memcpy(uuid, eir + i + 2, 16);
7459 if (has_uuid(uuid, uuid_count, uuids))
7465 parsed += field_len + 1;
7466 eir += field_len + 1;
7472 static void restart_le_scan(struct hci_dev *hdev)
7474 /* If controller is not scanning we are done. */
7475 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7478 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7479 hdev->discovery.scan_start +
7480 hdev->discovery.scan_duration))
7483 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
7484 DISCOV_LE_RESTART_DELAY);
7487 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7488 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7490 /* If a RSSI threshold has been specified, and
7491 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7492 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7493 * is set, let it through for further processing, as we might need to
7496 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7497 * the results are also dropped.
7499 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7500 (rssi == HCI_RSSI_INVALID ||
7501 (rssi < hdev->discovery.rssi &&
7502 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7505 if (hdev->discovery.uuid_count != 0) {
7506 /* If a list of UUIDs is provided in filter, results with no
7507 * matching UUID should be dropped.
7509 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7510 hdev->discovery.uuids) &&
7511 !eir_has_uuids(scan_rsp, scan_rsp_len,
7512 hdev->discovery.uuid_count,
7513 hdev->discovery.uuids))
7517 /* If duplicate filtering does not report RSSI changes, then restart
7518 * scanning to ensure updated result with updated RSSI values.
7520 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7521 restart_le_scan(hdev);
7523 /* Validate RSSI value against the RSSI threshold once more. */
7524 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7525 rssi < hdev->discovery.rssi)
7532 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7533 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7534 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7537 struct mgmt_ev_device_found *ev = (void *)buf;
7540 /* Don't send events for a non-kernel initiated discovery. With
7541 * LE one exception is if we have pend_le_reports > 0 in which
7542 * case we're doing passive scanning and want these events.
7544 if (!hci_discovery_active(hdev)) {
7545 if (link_type == ACL_LINK)
7547 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7551 if (hdev->discovery.result_filtering) {
7552 /* We are using service discovery */
7553 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7558 /* Make sure that the buffer is big enough. The 5 extra bytes
7559 * are for the potential CoD field.
7561 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7564 memset(buf, 0, sizeof(buf));
7566 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7567 * RSSI value was reported as 0 when not available. This behavior
7568 * is kept when using device discovery. This is required for full
7569 * backwards compatibility with the API.
7571 * However when using service discovery, the value 127 will be
7572 * returned when the RSSI is not available.
7574 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7575 link_type == ACL_LINK)
7578 bacpy(&ev->addr.bdaddr, bdaddr);
7579 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7581 ev->flags = cpu_to_le32(flags);
7584 /* Copy EIR or advertising data into event */
7585 memcpy(ev->eir, eir, eir_len);
7587 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7588 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7591 if (scan_rsp_len > 0)
7592 /* Append scan response data to event */
7593 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7595 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7596 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7598 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7601 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7602 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7604 struct mgmt_ev_device_found *ev;
7605 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7608 ev = (struct mgmt_ev_device_found *) buf;
7610 memset(buf, 0, sizeof(buf));
7612 bacpy(&ev->addr.bdaddr, bdaddr);
7613 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7616 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7619 ev->eir_len = cpu_to_le16(eir_len);
7621 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7624 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7626 struct mgmt_ev_discovering ev;
7628 BT_DBG("%s discovering %u", hdev->name, discovering);
7630 memset(&ev, 0, sizeof(ev));
7631 ev.type = hdev->discovery.type;
7632 ev.discovering = discovering;
7634 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7637 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7639 BT_DBG("%s status %u", hdev->name, status);
7642 void mgmt_reenable_advertising(struct hci_dev *hdev)
7644 struct hci_request req;
7646 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
7649 hci_req_init(&req, hdev);
7650 enable_advertising(&req);
7651 hci_req_run(&req, adv_enable_complete);
7654 static struct hci_mgmt_chan chan = {
7655 .channel = HCI_CHANNEL_CONTROL,
7656 .handler_count = ARRAY_SIZE(mgmt_handlers),
7657 .handlers = mgmt_handlers,
7662 return hci_mgmt_chan_register(&chan);
7665 void mgmt_exit(void)
7667 hci_mgmt_chan_unregister(&chan);