Bluetooth: Move HCI_RUNNING check into hci_send_frame
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97
98         if (!test_bit(HCI_UP, &hdev->flags))
99                 return -ENETDOWN;
100
101         if (copy_from_user(buf, user_buf, buf_size))
102                 return -EFAULT;
103
104         buf[buf_size] = '\0';
105         if (strtobool(buf, &enable))
106                 return -EINVAL;
107
108         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
109                 return -EALREADY;
110
111         hci_req_lock(hdev);
112         if (enable)
113                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114                                      HCI_CMD_TIMEOUT);
115         else
116                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117                                      HCI_CMD_TIMEOUT);
118         hci_req_unlock(hdev);
119
120         if (IS_ERR(skb))
121                 return PTR_ERR(skb);
122
123         kfree_skb(skb);
124
125         hci_dev_change_flag(hdev, HCI_DUT_MODE);
126
127         return count;
128 }
129
130 static const struct file_operations dut_mode_fops = {
131         .open           = simple_open,
132         .read           = dut_mode_read,
133         .write          = dut_mode_write,
134         .llseek         = default_llseek,
135 };
136
137 /* ---- HCI requests ---- */
138
139 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
140                                   struct sk_buff *skb)
141 {
142         BT_DBG("%s result 0x%2.2x", hdev->name, result);
143
144         if (hdev->req_status == HCI_REQ_PEND) {
145                 hdev->req_result = result;
146                 hdev->req_status = HCI_REQ_DONE;
147                 if (skb)
148                         hdev->req_skb = skb_get(skb);
149                 wake_up_interruptible(&hdev->req_wait_q);
150         }
151 }
152
153 static void hci_req_cancel(struct hci_dev *hdev, int err)
154 {
155         BT_DBG("%s err 0x%2.2x", hdev->name, err);
156
157         if (hdev->req_status == HCI_REQ_PEND) {
158                 hdev->req_result = err;
159                 hdev->req_status = HCI_REQ_CANCELED;
160                 wake_up_interruptible(&hdev->req_wait_q);
161         }
162 }
163
164 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
165                                   const void *param, u8 event, u32 timeout)
166 {
167         DECLARE_WAITQUEUE(wait, current);
168         struct hci_request req;
169         struct sk_buff *skb;
170         int err = 0;
171
172         BT_DBG("%s", hdev->name);
173
174         hci_req_init(&req, hdev);
175
176         hci_req_add_ev(&req, opcode, plen, param, event);
177
178         hdev->req_status = HCI_REQ_PEND;
179
180         add_wait_queue(&hdev->req_wait_q, &wait);
181         set_current_state(TASK_INTERRUPTIBLE);
182
183         err = hci_req_run_skb(&req, hci_req_sync_complete);
184         if (err < 0) {
185                 remove_wait_queue(&hdev->req_wait_q, &wait);
186                 set_current_state(TASK_RUNNING);
187                 return ERR_PTR(err);
188         }
189
190         schedule_timeout(timeout);
191
192         remove_wait_queue(&hdev->req_wait_q, &wait);
193
194         if (signal_pending(current))
195                 return ERR_PTR(-EINTR);
196
197         switch (hdev->req_status) {
198         case HCI_REQ_DONE:
199                 err = -bt_to_errno(hdev->req_result);
200                 break;
201
202         case HCI_REQ_CANCELED:
203                 err = -hdev->req_result;
204                 break;
205
206         default:
207                 err = -ETIMEDOUT;
208                 break;
209         }
210
211         hdev->req_status = hdev->req_result = 0;
212         skb = hdev->req_skb;
213         hdev->req_skb = NULL;
214
215         BT_DBG("%s end: err %d", hdev->name, err);
216
217         if (err < 0) {
218                 kfree_skb(skb);
219                 return ERR_PTR(err);
220         }
221
222         if (!skb)
223                 return ERR_PTR(-ENODATA);
224
225         return skb;
226 }
227 EXPORT_SYMBOL(__hci_cmd_sync_ev);
228
229 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
230                                const void *param, u32 timeout)
231 {
232         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
233 }
234 EXPORT_SYMBOL(__hci_cmd_sync);
235
236 /* Execute request and wait for completion. */
237 static int __hci_req_sync(struct hci_dev *hdev,
238                           void (*func)(struct hci_request *req,
239                                       unsigned long opt),
240                           unsigned long opt, __u32 timeout)
241 {
242         struct hci_request req;
243         DECLARE_WAITQUEUE(wait, current);
244         int err = 0;
245
246         BT_DBG("%s start", hdev->name);
247
248         hci_req_init(&req, hdev);
249
250         hdev->req_status = HCI_REQ_PEND;
251
252         func(&req, opt);
253
254         add_wait_queue(&hdev->req_wait_q, &wait);
255         set_current_state(TASK_INTERRUPTIBLE);
256
257         err = hci_req_run_skb(&req, hci_req_sync_complete);
258         if (err < 0) {
259                 hdev->req_status = 0;
260
261                 remove_wait_queue(&hdev->req_wait_q, &wait);
262                 set_current_state(TASK_RUNNING);
263
264                 /* ENODATA means the HCI request command queue is empty.
265                  * This can happen when a request with conditionals doesn't
266                  * trigger any commands to be sent. This is normal behavior
267                  * and should not trigger an error return.
268                  */
269                 if (err == -ENODATA)
270                         return 0;
271
272                 return err;
273         }
274
275         schedule_timeout(timeout);
276
277         remove_wait_queue(&hdev->req_wait_q, &wait);
278
279         if (signal_pending(current))
280                 return -EINTR;
281
282         switch (hdev->req_status) {
283         case HCI_REQ_DONE:
284                 err = -bt_to_errno(hdev->req_result);
285                 break;
286
287         case HCI_REQ_CANCELED:
288                 err = -hdev->req_result;
289                 break;
290
291         default:
292                 err = -ETIMEDOUT;
293                 break;
294         }
295
296         hdev->req_status = hdev->req_result = 0;
297
298         BT_DBG("%s end: err %d", hdev->name, err);
299
300         return err;
301 }
302
303 static int hci_req_sync(struct hci_dev *hdev,
304                         void (*req)(struct hci_request *req,
305                                     unsigned long opt),
306                         unsigned long opt, __u32 timeout)
307 {
308         int ret;
309
310         if (!test_bit(HCI_UP, &hdev->flags))
311                 return -ENETDOWN;
312
313         /* Serialize all requests */
314         hci_req_lock(hdev);
315         ret = __hci_req_sync(hdev, req, opt, timeout);
316         hci_req_unlock(hdev);
317
318         return ret;
319 }
320
321 static void hci_reset_req(struct hci_request *req, unsigned long opt)
322 {
323         BT_DBG("%s %ld", req->hdev->name, opt);
324
325         /* Reset device */
326         set_bit(HCI_RESET, &req->hdev->flags);
327         hci_req_add(req, HCI_OP_RESET, 0, NULL);
328 }
329
330 static void bredr_init(struct hci_request *req)
331 {
332         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
333
334         /* Read Local Supported Features */
335         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
336
337         /* Read Local Version */
338         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
339
340         /* Read BD Address */
341         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
342 }
343
344 static void amp_init1(struct hci_request *req)
345 {
346         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
347
348         /* Read Local Version */
349         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
350
351         /* Read Local Supported Commands */
352         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
353
354         /* Read Local AMP Info */
355         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
356
357         /* Read Data Blk size */
358         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
359
360         /* Read Flow Control Mode */
361         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
362
363         /* Read Location Data */
364         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
365 }
366
367 static void amp_init2(struct hci_request *req)
368 {
369         /* Read Local Supported Features. Not all AMP controllers
370          * support this so it's placed conditionally in the second
371          * stage init.
372          */
373         if (req->hdev->commands[14] & 0x20)
374                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
375 }
376
377 static void hci_init1_req(struct hci_request *req, unsigned long opt)
378 {
379         struct hci_dev *hdev = req->hdev;
380
381         BT_DBG("%s %ld", hdev->name, opt);
382
383         /* Reset */
384         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
385                 hci_reset_req(req, 0);
386
387         switch (hdev->dev_type) {
388         case HCI_BREDR:
389                 bredr_init(req);
390                 break;
391
392         case HCI_AMP:
393                 amp_init1(req);
394                 break;
395
396         default:
397                 BT_ERR("Unknown device type %d", hdev->dev_type);
398                 break;
399         }
400 }
401
402 static void bredr_setup(struct hci_request *req)
403 {
404         __le16 param;
405         __u8 flt_type;
406
407         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
408         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
409
410         /* Read Class of Device */
411         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
412
413         /* Read Local Name */
414         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
415
416         /* Read Voice Setting */
417         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
418
419         /* Read Number of Supported IAC */
420         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
421
422         /* Read Current IAC LAP */
423         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
424
425         /* Clear Event Filters */
426         flt_type = HCI_FLT_CLEAR_ALL;
427         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
428
429         /* Connection accept timeout ~20 secs */
430         param = cpu_to_le16(0x7d00);
431         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
432 }
433
434 static void le_setup(struct hci_request *req)
435 {
436         struct hci_dev *hdev = req->hdev;
437
438         /* Read LE Buffer Size */
439         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
440
441         /* Read LE Local Supported Features */
442         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
443
444         /* Read LE Supported States */
445         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
446
447         /* Read LE White List Size */
448         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
449
450         /* Clear LE White List */
451         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
452
453         /* LE-only controllers have LE implicitly enabled */
454         if (!lmp_bredr_capable(hdev))
455                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
456 }
457
458 static void hci_setup_event_mask(struct hci_request *req)
459 {
460         struct hci_dev *hdev = req->hdev;
461
462         /* The second byte is 0xff instead of 0x9f (two reserved bits
463          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
464          * command otherwise.
465          */
466         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
467
468         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
469          * any event mask for pre 1.2 devices.
470          */
471         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
472                 return;
473
474         if (lmp_bredr_capable(hdev)) {
475                 events[4] |= 0x01; /* Flow Specification Complete */
476                 events[4] |= 0x02; /* Inquiry Result with RSSI */
477                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
478                 events[5] |= 0x08; /* Synchronous Connection Complete */
479                 events[5] |= 0x10; /* Synchronous Connection Changed */
480         } else {
481                 /* Use a different default for LE-only devices */
482                 memset(events, 0, sizeof(events));
483                 events[0] |= 0x10; /* Disconnection Complete */
484                 events[1] |= 0x08; /* Read Remote Version Information Complete */
485                 events[1] |= 0x20; /* Command Complete */
486                 events[1] |= 0x40; /* Command Status */
487                 events[1] |= 0x80; /* Hardware Error */
488                 events[2] |= 0x04; /* Number of Completed Packets */
489                 events[3] |= 0x02; /* Data Buffer Overflow */
490
491                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
492                         events[0] |= 0x80; /* Encryption Change */
493                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
494                 }
495         }
496
497         if (lmp_inq_rssi_capable(hdev))
498                 events[4] |= 0x02; /* Inquiry Result with RSSI */
499
500         if (lmp_sniffsubr_capable(hdev))
501                 events[5] |= 0x20; /* Sniff Subrating */
502
503         if (lmp_pause_enc_capable(hdev))
504                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
505
506         if (lmp_ext_inq_capable(hdev))
507                 events[5] |= 0x40; /* Extended Inquiry Result */
508
509         if (lmp_no_flush_capable(hdev))
510                 events[7] |= 0x01; /* Enhanced Flush Complete */
511
512         if (lmp_lsto_capable(hdev))
513                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
514
515         if (lmp_ssp_capable(hdev)) {
516                 events[6] |= 0x01;      /* IO Capability Request */
517                 events[6] |= 0x02;      /* IO Capability Response */
518                 events[6] |= 0x04;      /* User Confirmation Request */
519                 events[6] |= 0x08;      /* User Passkey Request */
520                 events[6] |= 0x10;      /* Remote OOB Data Request */
521                 events[6] |= 0x20;      /* Simple Pairing Complete */
522                 events[7] |= 0x04;      /* User Passkey Notification */
523                 events[7] |= 0x08;      /* Keypress Notification */
524                 events[7] |= 0x10;      /* Remote Host Supported
525                                          * Features Notification
526                                          */
527         }
528
529         if (lmp_le_capable(hdev))
530                 events[7] |= 0x20;      /* LE Meta-Event */
531
532         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
533 }
534
535 static void hci_init2_req(struct hci_request *req, unsigned long opt)
536 {
537         struct hci_dev *hdev = req->hdev;
538
539         if (hdev->dev_type == HCI_AMP)
540                 return amp_init2(req);
541
542         if (lmp_bredr_capable(hdev))
543                 bredr_setup(req);
544         else
545                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
546
547         if (lmp_le_capable(hdev))
548                 le_setup(req);
549
550         /* All Bluetooth 1.2 and later controllers should support the
551          * HCI command for reading the local supported commands.
552          *
553          * Unfortunately some controllers indicate Bluetooth 1.2 support,
554          * but do not have support for this command. If that is the case,
555          * the driver can quirk the behavior and skip reading the local
556          * supported commands.
557          */
558         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
559             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
560                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
561
562         if (lmp_ssp_capable(hdev)) {
563                 /* When SSP is available, then the host features page
564                  * should also be available as well. However some
565                  * controllers list the max_page as 0 as long as SSP
566                  * has not been enabled. To achieve proper debugging
567                  * output, force the minimum max_page to 1 at least.
568                  */
569                 hdev->max_page = 0x01;
570
571                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
572                         u8 mode = 0x01;
573
574                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
575                                     sizeof(mode), &mode);
576                 } else {
577                         struct hci_cp_write_eir cp;
578
579                         memset(hdev->eir, 0, sizeof(hdev->eir));
580                         memset(&cp, 0, sizeof(cp));
581
582                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
583                 }
584         }
585
586         if (lmp_inq_rssi_capable(hdev) ||
587             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
588                 u8 mode;
589
590                 /* If Extended Inquiry Result events are supported, then
591                  * they are clearly preferred over Inquiry Result with RSSI
592                  * events.
593                  */
594                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
595
596                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
597         }
598
599         if (lmp_inq_tx_pwr_capable(hdev))
600                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
601
602         if (lmp_ext_feat_capable(hdev)) {
603                 struct hci_cp_read_local_ext_features cp;
604
605                 cp.page = 0x01;
606                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
607                             sizeof(cp), &cp);
608         }
609
610         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
611                 u8 enable = 1;
612                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
613                             &enable);
614         }
615 }
616
617 static void hci_setup_link_policy(struct hci_request *req)
618 {
619         struct hci_dev *hdev = req->hdev;
620         struct hci_cp_write_def_link_policy cp;
621         u16 link_policy = 0;
622
623         if (lmp_rswitch_capable(hdev))
624                 link_policy |= HCI_LP_RSWITCH;
625         if (lmp_hold_capable(hdev))
626                 link_policy |= HCI_LP_HOLD;
627         if (lmp_sniff_capable(hdev))
628                 link_policy |= HCI_LP_SNIFF;
629         if (lmp_park_capable(hdev))
630                 link_policy |= HCI_LP_PARK;
631
632         cp.policy = cpu_to_le16(link_policy);
633         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
634 }
635
636 static void hci_set_le_support(struct hci_request *req)
637 {
638         struct hci_dev *hdev = req->hdev;
639         struct hci_cp_write_le_host_supported cp;
640
641         /* LE-only devices do not support explicit enablement */
642         if (!lmp_bredr_capable(hdev))
643                 return;
644
645         memset(&cp, 0, sizeof(cp));
646
647         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
648                 cp.le = 0x01;
649                 cp.simul = 0x00;
650         }
651
652         if (cp.le != lmp_host_le_capable(hdev))
653                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
654                             &cp);
655 }
656
657 static void hci_set_event_mask_page_2(struct hci_request *req)
658 {
659         struct hci_dev *hdev = req->hdev;
660         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
661
662         /* If Connectionless Slave Broadcast master role is supported
663          * enable all necessary events for it.
664          */
665         if (lmp_csb_master_capable(hdev)) {
666                 events[1] |= 0x40;      /* Triggered Clock Capture */
667                 events[1] |= 0x80;      /* Synchronization Train Complete */
668                 events[2] |= 0x10;      /* Slave Page Response Timeout */
669                 events[2] |= 0x20;      /* CSB Channel Map Change */
670         }
671
672         /* If Connectionless Slave Broadcast slave role is supported
673          * enable all necessary events for it.
674          */
675         if (lmp_csb_slave_capable(hdev)) {
676                 events[2] |= 0x01;      /* Synchronization Train Received */
677                 events[2] |= 0x02;      /* CSB Receive */
678                 events[2] |= 0x04;      /* CSB Timeout */
679                 events[2] |= 0x08;      /* Truncated Page Complete */
680         }
681
682         /* Enable Authenticated Payload Timeout Expired event if supported */
683         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
684                 events[2] |= 0x80;
685
686         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
687 }
688
689 static void hci_init3_req(struct hci_request *req, unsigned long opt)
690 {
691         struct hci_dev *hdev = req->hdev;
692         u8 p;
693
694         hci_setup_event_mask(req);
695
696         if (hdev->commands[6] & 0x20 &&
697             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
698                 struct hci_cp_read_stored_link_key cp;
699
700                 bacpy(&cp.bdaddr, BDADDR_ANY);
701                 cp.read_all = 0x01;
702                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
703         }
704
705         if (hdev->commands[5] & 0x10)
706                 hci_setup_link_policy(req);
707
708         if (hdev->commands[8] & 0x01)
709                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
710
711         /* Some older Broadcom based Bluetooth 1.2 controllers do not
712          * support the Read Page Scan Type command. Check support for
713          * this command in the bit mask of supported commands.
714          */
715         if (hdev->commands[13] & 0x01)
716                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
717
718         if (lmp_le_capable(hdev)) {
719                 u8 events[8];
720
721                 memset(events, 0, sizeof(events));
722                 events[0] = 0x0f;
723
724                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
725                         events[0] |= 0x10;      /* LE Long Term Key Request */
726
727                 /* If controller supports the Connection Parameters Request
728                  * Link Layer Procedure, enable the corresponding event.
729                  */
730                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
731                         events[0] |= 0x20;      /* LE Remote Connection
732                                                  * Parameter Request
733                                                  */
734
735                 /* If the controller supports the Data Length Extension
736                  * feature, enable the corresponding event.
737                  */
738                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
739                         events[0] |= 0x40;      /* LE Data Length Change */
740
741                 /* If the controller supports Extended Scanner Filter
742                  * Policies, enable the correspondig event.
743                  */
744                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
745                         events[1] |= 0x04;      /* LE Direct Advertising
746                                                  * Report
747                                                  */
748
749                 /* If the controller supports the LE Read Local P-256
750                  * Public Key command, enable the corresponding event.
751                  */
752                 if (hdev->commands[34] & 0x02)
753                         events[0] |= 0x80;      /* LE Read Local P-256
754                                                  * Public Key Complete
755                                                  */
756
757                 /* If the controller supports the LE Generate DHKey
758                  * command, enable the corresponding event.
759                  */
760                 if (hdev->commands[34] & 0x04)
761                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
762
763                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
764                             events);
765
766                 if (hdev->commands[25] & 0x40) {
767                         /* Read LE Advertising Channel TX Power */
768                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
769                 }
770
771                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
772                         /* Read LE Maximum Data Length */
773                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
774
775                         /* Read LE Suggested Default Data Length */
776                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
777                 }
778
779                 hci_set_le_support(req);
780         }
781
782         /* Read features beyond page 1 if available */
783         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
784                 struct hci_cp_read_local_ext_features cp;
785
786                 cp.page = p;
787                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
788                             sizeof(cp), &cp);
789         }
790 }
791
792 static void hci_init4_req(struct hci_request *req, unsigned long opt)
793 {
794         struct hci_dev *hdev = req->hdev;
795
796         /* Some Broadcom based Bluetooth controllers do not support the
797          * Delete Stored Link Key command. They are clearly indicating its
798          * absence in the bit mask of supported commands.
799          *
800          * Check the supported commands and only if the the command is marked
801          * as supported send it. If not supported assume that the controller
802          * does not have actual support for stored link keys which makes this
803          * command redundant anyway.
804          *
805          * Some controllers indicate that they support handling deleting
806          * stored link keys, but they don't. The quirk lets a driver
807          * just disable this command.
808          */
809         if (hdev->commands[6] & 0x80 &&
810             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
811                 struct hci_cp_delete_stored_link_key cp;
812
813                 bacpy(&cp.bdaddr, BDADDR_ANY);
814                 cp.delete_all = 0x01;
815                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
816                             sizeof(cp), &cp);
817         }
818
819         /* Set event mask page 2 if the HCI command for it is supported */
820         if (hdev->commands[22] & 0x04)
821                 hci_set_event_mask_page_2(req);
822
823         /* Read local codec list if the HCI command is supported */
824         if (hdev->commands[29] & 0x20)
825                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
826
827         /* Get MWS transport configuration if the HCI command is supported */
828         if (hdev->commands[30] & 0x08)
829                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
830
831         /* Check for Synchronization Train support */
832         if (lmp_sync_train_capable(hdev))
833                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
834
835         /* Enable Secure Connections if supported and configured */
836         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
837             bredr_sc_enabled(hdev)) {
838                 u8 support = 0x01;
839
840                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
841                             sizeof(support), &support);
842         }
843 }
844
845 static int __hci_init(struct hci_dev *hdev)
846 {
847         int err;
848
849         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
850         if (err < 0)
851                 return err;
852
853         /* The Device Under Test (DUT) mode is special and available for
854          * all controller types. So just create it early on.
855          */
856         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
857                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
858                                     &dut_mode_fops);
859         }
860
861         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
862         if (err < 0)
863                 return err;
864
865         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
866          * BR/EDR/LE type controllers. AMP controllers only need the
867          * first two stages of init.
868          */
869         if (hdev->dev_type != HCI_BREDR)
870                 return 0;
871
872         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
873         if (err < 0)
874                 return err;
875
876         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
877         if (err < 0)
878                 return err;
879
880         /* This function is only called when the controller is actually in
881          * configured state. When the controller is marked as unconfigured,
882          * this initialization procedure is not run.
883          *
884          * It means that it is possible that a controller runs through its
885          * setup phase and then discovers missing settings. If that is the
886          * case, then this function will not be called. It then will only
887          * be called during the config phase.
888          *
889          * So only when in setup phase or config phase, create the debugfs
890          * entries and register the SMP channels.
891          */
892         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
893             !hci_dev_test_flag(hdev, HCI_CONFIG))
894                 return 0;
895
896         hci_debugfs_create_common(hdev);
897
898         if (lmp_bredr_capable(hdev))
899                 hci_debugfs_create_bredr(hdev);
900
901         if (lmp_le_capable(hdev))
902                 hci_debugfs_create_le(hdev);
903
904         return 0;
905 }
906
907 static void hci_init0_req(struct hci_request *req, unsigned long opt)
908 {
909         struct hci_dev *hdev = req->hdev;
910
911         BT_DBG("%s %ld", hdev->name, opt);
912
913         /* Reset */
914         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
915                 hci_reset_req(req, 0);
916
917         /* Read Local Version */
918         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
919
920         /* Read BD Address */
921         if (hdev->set_bdaddr)
922                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
923 }
924
925 static int __hci_unconf_init(struct hci_dev *hdev)
926 {
927         int err;
928
929         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
930                 return 0;
931
932         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
933         if (err < 0)
934                 return err;
935
936         return 0;
937 }
938
939 static void hci_scan_req(struct hci_request *req, unsigned long opt)
940 {
941         __u8 scan = opt;
942
943         BT_DBG("%s %x", req->hdev->name, scan);
944
945         /* Inquiry and Page scans */
946         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
947 }
948
949 static void hci_auth_req(struct hci_request *req, unsigned long opt)
950 {
951         __u8 auth = opt;
952
953         BT_DBG("%s %x", req->hdev->name, auth);
954
955         /* Authentication */
956         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
957 }
958
959 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
960 {
961         __u8 encrypt = opt;
962
963         BT_DBG("%s %x", req->hdev->name, encrypt);
964
965         /* Encryption */
966         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
967 }
968
969 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
970 {
971         __le16 policy = cpu_to_le16(opt);
972
973         BT_DBG("%s %x", req->hdev->name, policy);
974
975         /* Default link policy */
976         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
977 }
978
979 /* Get HCI device by index.
980  * Device is held on return. */
981 struct hci_dev *hci_dev_get(int index)
982 {
983         struct hci_dev *hdev = NULL, *d;
984
985         BT_DBG("%d", index);
986
987         if (index < 0)
988                 return NULL;
989
990         read_lock(&hci_dev_list_lock);
991         list_for_each_entry(d, &hci_dev_list, list) {
992                 if (d->id == index) {
993                         hdev = hci_dev_hold(d);
994                         break;
995                 }
996         }
997         read_unlock(&hci_dev_list_lock);
998         return hdev;
999 }
1000
1001 /* ---- Inquiry support ---- */
1002
1003 bool hci_discovery_active(struct hci_dev *hdev)
1004 {
1005         struct discovery_state *discov = &hdev->discovery;
1006
1007         switch (discov->state) {
1008         case DISCOVERY_FINDING:
1009         case DISCOVERY_RESOLVING:
1010                 return true;
1011
1012         default:
1013                 return false;
1014         }
1015 }
1016
1017 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1018 {
1019         int old_state = hdev->discovery.state;
1020
1021         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1022
1023         if (old_state == state)
1024                 return;
1025
1026         hdev->discovery.state = state;
1027
1028         switch (state) {
1029         case DISCOVERY_STOPPED:
1030                 hci_update_background_scan(hdev);
1031
1032                 if (old_state != DISCOVERY_STARTING)
1033                         mgmt_discovering(hdev, 0);
1034                 break;
1035         case DISCOVERY_STARTING:
1036                 break;
1037         case DISCOVERY_FINDING:
1038                 mgmt_discovering(hdev, 1);
1039                 break;
1040         case DISCOVERY_RESOLVING:
1041                 break;
1042         case DISCOVERY_STOPPING:
1043                 break;
1044         }
1045 }
1046
1047 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1048 {
1049         struct discovery_state *cache = &hdev->discovery;
1050         struct inquiry_entry *p, *n;
1051
1052         list_for_each_entry_safe(p, n, &cache->all, all) {
1053                 list_del(&p->all);
1054                 kfree(p);
1055         }
1056
1057         INIT_LIST_HEAD(&cache->unknown);
1058         INIT_LIST_HEAD(&cache->resolve);
1059 }
1060
1061 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1062                                                bdaddr_t *bdaddr)
1063 {
1064         struct discovery_state *cache = &hdev->discovery;
1065         struct inquiry_entry *e;
1066
1067         BT_DBG("cache %p, %pMR", cache, bdaddr);
1068
1069         list_for_each_entry(e, &cache->all, all) {
1070                 if (!bacmp(&e->data.bdaddr, bdaddr))
1071                         return e;
1072         }
1073
1074         return NULL;
1075 }
1076
1077 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1078                                                        bdaddr_t *bdaddr)
1079 {
1080         struct discovery_state *cache = &hdev->discovery;
1081         struct inquiry_entry *e;
1082
1083         BT_DBG("cache %p, %pMR", cache, bdaddr);
1084
1085         list_for_each_entry(e, &cache->unknown, list) {
1086                 if (!bacmp(&e->data.bdaddr, bdaddr))
1087                         return e;
1088         }
1089
1090         return NULL;
1091 }
1092
1093 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1094                                                        bdaddr_t *bdaddr,
1095                                                        int state)
1096 {
1097         struct discovery_state *cache = &hdev->discovery;
1098         struct inquiry_entry *e;
1099
1100         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1101
1102         list_for_each_entry(e, &cache->resolve, list) {
1103                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1104                         return e;
1105                 if (!bacmp(&e->data.bdaddr, bdaddr))
1106                         return e;
1107         }
1108
1109         return NULL;
1110 }
1111
1112 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1113                                       struct inquiry_entry *ie)
1114 {
1115         struct discovery_state *cache = &hdev->discovery;
1116         struct list_head *pos = &cache->resolve;
1117         struct inquiry_entry *p;
1118
1119         list_del(&ie->list);
1120
1121         list_for_each_entry(p, &cache->resolve, list) {
1122                 if (p->name_state != NAME_PENDING &&
1123                     abs(p->data.rssi) >= abs(ie->data.rssi))
1124                         break;
1125                 pos = &p->list;
1126         }
1127
1128         list_add(&ie->list, pos);
1129 }
1130
1131 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1132                              bool name_known)
1133 {
1134         struct discovery_state *cache = &hdev->discovery;
1135         struct inquiry_entry *ie;
1136         u32 flags = 0;
1137
1138         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1139
1140         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1141
1142         if (!data->ssp_mode)
1143                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1144
1145         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1146         if (ie) {
1147                 if (!ie->data.ssp_mode)
1148                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1149
1150                 if (ie->name_state == NAME_NEEDED &&
1151                     data->rssi != ie->data.rssi) {
1152                         ie->data.rssi = data->rssi;
1153                         hci_inquiry_cache_update_resolve(hdev, ie);
1154                 }
1155
1156                 goto update;
1157         }
1158
1159         /* Entry not in the cache. Add new one. */
1160         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1161         if (!ie) {
1162                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1163                 goto done;
1164         }
1165
1166         list_add(&ie->all, &cache->all);
1167
1168         if (name_known) {
1169                 ie->name_state = NAME_KNOWN;
1170         } else {
1171                 ie->name_state = NAME_NOT_KNOWN;
1172                 list_add(&ie->list, &cache->unknown);
1173         }
1174
1175 update:
1176         if (name_known && ie->name_state != NAME_KNOWN &&
1177             ie->name_state != NAME_PENDING) {
1178                 ie->name_state = NAME_KNOWN;
1179                 list_del(&ie->list);
1180         }
1181
1182         memcpy(&ie->data, data, sizeof(*data));
1183         ie->timestamp = jiffies;
1184         cache->timestamp = jiffies;
1185
1186         if (ie->name_state == NAME_NOT_KNOWN)
1187                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1188
1189 done:
1190         return flags;
1191 }
1192
1193 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1194 {
1195         struct discovery_state *cache = &hdev->discovery;
1196         struct inquiry_info *info = (struct inquiry_info *) buf;
1197         struct inquiry_entry *e;
1198         int copied = 0;
1199
1200         list_for_each_entry(e, &cache->all, all) {
1201                 struct inquiry_data *data = &e->data;
1202
1203                 if (copied >= num)
1204                         break;
1205
1206                 bacpy(&info->bdaddr, &data->bdaddr);
1207                 info->pscan_rep_mode    = data->pscan_rep_mode;
1208                 info->pscan_period_mode = data->pscan_period_mode;
1209                 info->pscan_mode        = data->pscan_mode;
1210                 memcpy(info->dev_class, data->dev_class, 3);
1211                 info->clock_offset      = data->clock_offset;
1212
1213                 info++;
1214                 copied++;
1215         }
1216
1217         BT_DBG("cache %p, copied %d", cache, copied);
1218         return copied;
1219 }
1220
1221 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1222 {
1223         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1224         struct hci_dev *hdev = req->hdev;
1225         struct hci_cp_inquiry cp;
1226
1227         BT_DBG("%s", hdev->name);
1228
1229         if (test_bit(HCI_INQUIRY, &hdev->flags))
1230                 return;
1231
1232         /* Start Inquiry */
1233         memcpy(&cp.lap, &ir->lap, 3);
1234         cp.length  = ir->length;
1235         cp.num_rsp = ir->num_rsp;
1236         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1237 }
1238
1239 int hci_inquiry(void __user *arg)
1240 {
1241         __u8 __user *ptr = arg;
1242         struct hci_inquiry_req ir;
1243         struct hci_dev *hdev;
1244         int err = 0, do_inquiry = 0, max_rsp;
1245         long timeo;
1246         __u8 *buf;
1247
1248         if (copy_from_user(&ir, ptr, sizeof(ir)))
1249                 return -EFAULT;
1250
1251         hdev = hci_dev_get(ir.dev_id);
1252         if (!hdev)
1253                 return -ENODEV;
1254
1255         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1256                 err = -EBUSY;
1257                 goto done;
1258         }
1259
1260         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1261                 err = -EOPNOTSUPP;
1262                 goto done;
1263         }
1264
1265         if (hdev->dev_type != HCI_BREDR) {
1266                 err = -EOPNOTSUPP;
1267                 goto done;
1268         }
1269
1270         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1271                 err = -EOPNOTSUPP;
1272                 goto done;
1273         }
1274
1275         hci_dev_lock(hdev);
1276         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1277             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1278                 hci_inquiry_cache_flush(hdev);
1279                 do_inquiry = 1;
1280         }
1281         hci_dev_unlock(hdev);
1282
1283         timeo = ir.length * msecs_to_jiffies(2000);
1284
1285         if (do_inquiry) {
1286                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1287                                    timeo);
1288                 if (err < 0)
1289                         goto done;
1290
1291                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1292                  * cleared). If it is interrupted by a signal, return -EINTR.
1293                  */
1294                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1295                                 TASK_INTERRUPTIBLE))
1296                         return -EINTR;
1297         }
1298
1299         /* for unlimited number of responses we will use buffer with
1300          * 255 entries
1301          */
1302         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1303
1304         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1305          * copy it to the user space.
1306          */
1307         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1308         if (!buf) {
1309                 err = -ENOMEM;
1310                 goto done;
1311         }
1312
1313         hci_dev_lock(hdev);
1314         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1315         hci_dev_unlock(hdev);
1316
1317         BT_DBG("num_rsp %d", ir.num_rsp);
1318
1319         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1320                 ptr += sizeof(ir);
1321                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1322                                  ir.num_rsp))
1323                         err = -EFAULT;
1324         } else
1325                 err = -EFAULT;
1326
1327         kfree(buf);
1328
1329 done:
1330         hci_dev_put(hdev);
1331         return err;
1332 }
1333
1334 static int hci_dev_do_open(struct hci_dev *hdev)
1335 {
1336         int ret = 0;
1337
1338         BT_DBG("%s %p", hdev->name, hdev);
1339
1340         hci_req_lock(hdev);
1341
1342         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1343                 ret = -ENODEV;
1344                 goto done;
1345         }
1346
1347         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1348             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1349                 /* Check for rfkill but allow the HCI setup stage to
1350                  * proceed (which in itself doesn't cause any RF activity).
1351                  */
1352                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1353                         ret = -ERFKILL;
1354                         goto done;
1355                 }
1356
1357                 /* Check for valid public address or a configured static
1358                  * random adddress, but let the HCI setup proceed to
1359                  * be able to determine if there is a public address
1360                  * or not.
1361                  *
1362                  * In case of user channel usage, it is not important
1363                  * if a public address or static random address is
1364                  * available.
1365                  *
1366                  * This check is only valid for BR/EDR controllers
1367                  * since AMP controllers do not have an address.
1368                  */
1369                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1370                     hdev->dev_type == HCI_BREDR &&
1371                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1372                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1373                         ret = -EADDRNOTAVAIL;
1374                         goto done;
1375                 }
1376         }
1377
1378         if (test_bit(HCI_UP, &hdev->flags)) {
1379                 ret = -EALREADY;
1380                 goto done;
1381         }
1382
1383         if (hdev->open(hdev)) {
1384                 ret = -EIO;
1385                 goto done;
1386         }
1387
1388         hci_notify(hdev, HCI_DEV_OPEN);
1389
1390         atomic_set(&hdev->cmd_cnt, 1);
1391         set_bit(HCI_INIT, &hdev->flags);
1392
1393         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1394                 if (hdev->setup)
1395                         ret = hdev->setup(hdev);
1396
1397                 /* The transport driver can set these quirks before
1398                  * creating the HCI device or in its setup callback.
1399                  *
1400                  * In case any of them is set, the controller has to
1401                  * start up as unconfigured.
1402                  */
1403                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1404                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1405                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1406
1407                 /* For an unconfigured controller it is required to
1408                  * read at least the version information provided by
1409                  * the Read Local Version Information command.
1410                  *
1411                  * If the set_bdaddr driver callback is provided, then
1412                  * also the original Bluetooth public device address
1413                  * will be read using the Read BD Address command.
1414                  */
1415                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1416                         ret = __hci_unconf_init(hdev);
1417         }
1418
1419         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1420                 /* If public address change is configured, ensure that
1421                  * the address gets programmed. If the driver does not
1422                  * support changing the public address, fail the power
1423                  * on procedure.
1424                  */
1425                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1426                     hdev->set_bdaddr)
1427                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1428                 else
1429                         ret = -EADDRNOTAVAIL;
1430         }
1431
1432         if (!ret) {
1433                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1434                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1435                         ret = __hci_init(hdev);
1436         }
1437
1438         clear_bit(HCI_INIT, &hdev->flags);
1439
1440         if (!ret) {
1441                 hci_dev_hold(hdev);
1442                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1443                 set_bit(HCI_UP, &hdev->flags);
1444                 hci_notify(hdev, HCI_DEV_UP);
1445                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1446                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1447                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1448                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1449                     hdev->dev_type == HCI_BREDR) {
1450                         hci_dev_lock(hdev);
1451                         mgmt_powered(hdev, 1);
1452                         hci_dev_unlock(hdev);
1453                 }
1454         } else {
1455                 /* Init failed, cleanup */
1456                 flush_work(&hdev->tx_work);
1457                 flush_work(&hdev->cmd_work);
1458                 flush_work(&hdev->rx_work);
1459
1460                 skb_queue_purge(&hdev->cmd_q);
1461                 skb_queue_purge(&hdev->rx_q);
1462
1463                 if (hdev->flush)
1464                         hdev->flush(hdev);
1465
1466                 if (hdev->sent_cmd) {
1467                         kfree_skb(hdev->sent_cmd);
1468                         hdev->sent_cmd = NULL;
1469                 }
1470
1471                 hci_notify(hdev, HCI_DEV_CLOSE);
1472
1473                 hdev->close(hdev);
1474                 hdev->flags &= BIT(HCI_RAW);
1475         }
1476
1477 done:
1478         hci_req_unlock(hdev);
1479         return ret;
1480 }
1481
1482 /* ---- HCI ioctl helpers ---- */
1483
1484 int hci_dev_open(__u16 dev)
1485 {
1486         struct hci_dev *hdev;
1487         int err;
1488
1489         hdev = hci_dev_get(dev);
1490         if (!hdev)
1491                 return -ENODEV;
1492
1493         /* Devices that are marked as unconfigured can only be powered
1494          * up as user channel. Trying to bring them up as normal devices
1495          * will result into a failure. Only user channel operation is
1496          * possible.
1497          *
1498          * When this function is called for a user channel, the flag
1499          * HCI_USER_CHANNEL will be set first before attempting to
1500          * open the device.
1501          */
1502         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1503             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1504                 err = -EOPNOTSUPP;
1505                 goto done;
1506         }
1507
1508         /* We need to ensure that no other power on/off work is pending
1509          * before proceeding to call hci_dev_do_open. This is
1510          * particularly important if the setup procedure has not yet
1511          * completed.
1512          */
1513         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1514                 cancel_delayed_work(&hdev->power_off);
1515
1516         /* After this call it is guaranteed that the setup procedure
1517          * has finished. This means that error conditions like RFKILL
1518          * or no valid public or static random address apply.
1519          */
1520         flush_workqueue(hdev->req_workqueue);
1521
1522         /* For controllers not using the management interface and that
1523          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1524          * so that pairing works for them. Once the management interface
1525          * is in use this bit will be cleared again and userspace has
1526          * to explicitly enable it.
1527          */
1528         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1529             !hci_dev_test_flag(hdev, HCI_MGMT))
1530                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1531
1532         err = hci_dev_do_open(hdev);
1533
1534 done:
1535         hci_dev_put(hdev);
1536         return err;
1537 }
1538
1539 /* This function requires the caller holds hdev->lock */
1540 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1541 {
1542         struct hci_conn_params *p;
1543
1544         list_for_each_entry(p, &hdev->le_conn_params, list) {
1545                 if (p->conn) {
1546                         hci_conn_drop(p->conn);
1547                         hci_conn_put(p->conn);
1548                         p->conn = NULL;
1549                 }
1550                 list_del_init(&p->action);
1551         }
1552
1553         BT_DBG("All LE pending actions cleared");
1554 }
1555
1556 int hci_dev_do_close(struct hci_dev *hdev)
1557 {
1558         BT_DBG("%s %p", hdev->name, hdev);
1559
1560         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1561             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1562             test_bit(HCI_UP, &hdev->flags)) {
1563                 /* Execute vendor specific shutdown routine */
1564                 if (hdev->shutdown)
1565                         hdev->shutdown(hdev);
1566         }
1567
1568         cancel_delayed_work(&hdev->power_off);
1569
1570         hci_req_cancel(hdev, ENODEV);
1571         hci_req_lock(hdev);
1572
1573         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1574                 cancel_delayed_work_sync(&hdev->cmd_timer);
1575                 hci_req_unlock(hdev);
1576                 return 0;
1577         }
1578
1579         /* Flush RX and TX works */
1580         flush_work(&hdev->tx_work);
1581         flush_work(&hdev->rx_work);
1582
1583         if (hdev->discov_timeout > 0) {
1584                 cancel_delayed_work(&hdev->discov_off);
1585                 hdev->discov_timeout = 0;
1586                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1587                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1588         }
1589
1590         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1591                 cancel_delayed_work(&hdev->service_cache);
1592
1593         cancel_delayed_work_sync(&hdev->le_scan_disable);
1594         cancel_delayed_work_sync(&hdev->le_scan_restart);
1595
1596         if (hci_dev_test_flag(hdev, HCI_MGMT))
1597                 cancel_delayed_work_sync(&hdev->rpa_expired);
1598
1599         if (hdev->adv_instance_timeout) {
1600                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1601                 hdev->adv_instance_timeout = 0;
1602         }
1603
1604         /* Avoid potential lockdep warnings from the *_flush() calls by
1605          * ensuring the workqueue is empty up front.
1606          */
1607         drain_workqueue(hdev->workqueue);
1608
1609         hci_dev_lock(hdev);
1610
1611         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1612
1613         if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1614                 if (hdev->dev_type == HCI_BREDR)
1615                         mgmt_powered(hdev, 0);
1616         }
1617
1618         hci_inquiry_cache_flush(hdev);
1619         hci_pend_le_actions_clear(hdev);
1620         hci_conn_hash_flush(hdev);
1621         hci_dev_unlock(hdev);
1622
1623         smp_unregister(hdev);
1624
1625         hci_notify(hdev, HCI_DEV_DOWN);
1626
1627         if (hdev->flush)
1628                 hdev->flush(hdev);
1629
1630         /* Reset device */
1631         skb_queue_purge(&hdev->cmd_q);
1632         atomic_set(&hdev->cmd_cnt, 1);
1633         if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1634             !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1635             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1636                 set_bit(HCI_INIT, &hdev->flags);
1637                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1638                 clear_bit(HCI_INIT, &hdev->flags);
1639         }
1640
1641         /* flush cmd  work */
1642         flush_work(&hdev->cmd_work);
1643
1644         /* Drop queues */
1645         skb_queue_purge(&hdev->rx_q);
1646         skb_queue_purge(&hdev->cmd_q);
1647         skb_queue_purge(&hdev->raw_q);
1648
1649         /* Drop last sent command */
1650         if (hdev->sent_cmd) {
1651                 cancel_delayed_work_sync(&hdev->cmd_timer);
1652                 kfree_skb(hdev->sent_cmd);
1653                 hdev->sent_cmd = NULL;
1654         }
1655
1656         hci_notify(hdev, HCI_DEV_CLOSE);
1657
1658         /* After this point our queues are empty
1659          * and no tasks are scheduled. */
1660         hdev->close(hdev);
1661
1662         /* Clear flags */
1663         hdev->flags &= BIT(HCI_RAW);
1664         hci_dev_clear_volatile_flags(hdev);
1665
1666         /* Controller radio is available but is currently powered down */
1667         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1668
1669         memset(hdev->eir, 0, sizeof(hdev->eir));
1670         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1671         bacpy(&hdev->random_addr, BDADDR_ANY);
1672
1673         hci_req_unlock(hdev);
1674
1675         hci_dev_put(hdev);
1676         return 0;
1677 }
1678
1679 int hci_dev_close(__u16 dev)
1680 {
1681         struct hci_dev *hdev;
1682         int err;
1683
1684         hdev = hci_dev_get(dev);
1685         if (!hdev)
1686                 return -ENODEV;
1687
1688         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1689                 err = -EBUSY;
1690                 goto done;
1691         }
1692
1693         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1694                 cancel_delayed_work(&hdev->power_off);
1695
1696         err = hci_dev_do_close(hdev);
1697
1698 done:
1699         hci_dev_put(hdev);
1700         return err;
1701 }
1702
1703 static int hci_dev_do_reset(struct hci_dev *hdev)
1704 {
1705         int ret;
1706
1707         BT_DBG("%s %p", hdev->name, hdev);
1708
1709         hci_req_lock(hdev);
1710
1711         /* Drop queues */
1712         skb_queue_purge(&hdev->rx_q);
1713         skb_queue_purge(&hdev->cmd_q);
1714
1715         /* Avoid potential lockdep warnings from the *_flush() calls by
1716          * ensuring the workqueue is empty up front.
1717          */
1718         drain_workqueue(hdev->workqueue);
1719
1720         hci_dev_lock(hdev);
1721         hci_inquiry_cache_flush(hdev);
1722         hci_conn_hash_flush(hdev);
1723         hci_dev_unlock(hdev);
1724
1725         if (hdev->flush)
1726                 hdev->flush(hdev);
1727
1728         atomic_set(&hdev->cmd_cnt, 1);
1729         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1730
1731         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1732
1733         hci_req_unlock(hdev);
1734         return ret;
1735 }
1736
1737 int hci_dev_reset(__u16 dev)
1738 {
1739         struct hci_dev *hdev;
1740         int err;
1741
1742         hdev = hci_dev_get(dev);
1743         if (!hdev)
1744                 return -ENODEV;
1745
1746         if (!test_bit(HCI_UP, &hdev->flags)) {
1747                 err = -ENETDOWN;
1748                 goto done;
1749         }
1750
1751         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1752                 err = -EBUSY;
1753                 goto done;
1754         }
1755
1756         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1757                 err = -EOPNOTSUPP;
1758                 goto done;
1759         }
1760
1761         err = hci_dev_do_reset(hdev);
1762
1763 done:
1764         hci_dev_put(hdev);
1765         return err;
1766 }
1767
1768 int hci_dev_reset_stat(__u16 dev)
1769 {
1770         struct hci_dev *hdev;
1771         int ret = 0;
1772
1773         hdev = hci_dev_get(dev);
1774         if (!hdev)
1775                 return -ENODEV;
1776
1777         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1778                 ret = -EBUSY;
1779                 goto done;
1780         }
1781
1782         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1783                 ret = -EOPNOTSUPP;
1784                 goto done;
1785         }
1786
1787         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1788
1789 done:
1790         hci_dev_put(hdev);
1791         return ret;
1792 }
1793
1794 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1795 {
1796         bool conn_changed, discov_changed;
1797
1798         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1799
1800         if ((scan & SCAN_PAGE))
1801                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1802                                                           HCI_CONNECTABLE);
1803         else
1804                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1805                                                            HCI_CONNECTABLE);
1806
1807         if ((scan & SCAN_INQUIRY)) {
1808                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1809                                                             HCI_DISCOVERABLE);
1810         } else {
1811                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1812                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1813                                                              HCI_DISCOVERABLE);
1814         }
1815
1816         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1817                 return;
1818
1819         if (conn_changed || discov_changed) {
1820                 /* In case this was disabled through mgmt */
1821                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1822
1823                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1824                         mgmt_update_adv_data(hdev);
1825
1826                 mgmt_new_settings(hdev);
1827         }
1828 }
1829
1830 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1831 {
1832         struct hci_dev *hdev;
1833         struct hci_dev_req dr;
1834         int err = 0;
1835
1836         if (copy_from_user(&dr, arg, sizeof(dr)))
1837                 return -EFAULT;
1838
1839         hdev = hci_dev_get(dr.dev_id);
1840         if (!hdev)
1841                 return -ENODEV;
1842
1843         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1844                 err = -EBUSY;
1845                 goto done;
1846         }
1847
1848         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1849                 err = -EOPNOTSUPP;
1850                 goto done;
1851         }
1852
1853         if (hdev->dev_type != HCI_BREDR) {
1854                 err = -EOPNOTSUPP;
1855                 goto done;
1856         }
1857
1858         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1859                 err = -EOPNOTSUPP;
1860                 goto done;
1861         }
1862
1863         switch (cmd) {
1864         case HCISETAUTH:
1865                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1866                                    HCI_INIT_TIMEOUT);
1867                 break;
1868
1869         case HCISETENCRYPT:
1870                 if (!lmp_encrypt_capable(hdev)) {
1871                         err = -EOPNOTSUPP;
1872                         break;
1873                 }
1874
1875                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1876                         /* Auth must be enabled first */
1877                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1878                                            HCI_INIT_TIMEOUT);
1879                         if (err)
1880                                 break;
1881                 }
1882
1883                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1884                                    HCI_INIT_TIMEOUT);
1885                 break;
1886
1887         case HCISETSCAN:
1888                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1889                                    HCI_INIT_TIMEOUT);
1890
1891                 /* Ensure that the connectable and discoverable states
1892                  * get correctly modified as this was a non-mgmt change.
1893                  */
1894                 if (!err)
1895                         hci_update_scan_state(hdev, dr.dev_opt);
1896                 break;
1897
1898         case HCISETLINKPOL:
1899                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1900                                    HCI_INIT_TIMEOUT);
1901                 break;
1902
1903         case HCISETLINKMODE:
1904                 hdev->link_mode = ((__u16) dr.dev_opt) &
1905                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1906                 break;
1907
1908         case HCISETPTYPE:
1909                 hdev->pkt_type = (__u16) dr.dev_opt;
1910                 break;
1911
1912         case HCISETACLMTU:
1913                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1914                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1915                 break;
1916
1917         case HCISETSCOMTU:
1918                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1919                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1920                 break;
1921
1922         default:
1923                 err = -EINVAL;
1924                 break;
1925         }
1926
1927 done:
1928         hci_dev_put(hdev);
1929         return err;
1930 }
1931
1932 int hci_get_dev_list(void __user *arg)
1933 {
1934         struct hci_dev *hdev;
1935         struct hci_dev_list_req *dl;
1936         struct hci_dev_req *dr;
1937         int n = 0, size, err;
1938         __u16 dev_num;
1939
1940         if (get_user(dev_num, (__u16 __user *) arg))
1941                 return -EFAULT;
1942
1943         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1944                 return -EINVAL;
1945
1946         size = sizeof(*dl) + dev_num * sizeof(*dr);
1947
1948         dl = kzalloc(size, GFP_KERNEL);
1949         if (!dl)
1950                 return -ENOMEM;
1951
1952         dr = dl->dev_req;
1953
1954         read_lock(&hci_dev_list_lock);
1955         list_for_each_entry(hdev, &hci_dev_list, list) {
1956                 unsigned long flags = hdev->flags;
1957
1958                 /* When the auto-off is configured it means the transport
1959                  * is running, but in that case still indicate that the
1960                  * device is actually down.
1961                  */
1962                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1963                         flags &= ~BIT(HCI_UP);
1964
1965                 (dr + n)->dev_id  = hdev->id;
1966                 (dr + n)->dev_opt = flags;
1967
1968                 if (++n >= dev_num)
1969                         break;
1970         }
1971         read_unlock(&hci_dev_list_lock);
1972
1973         dl->dev_num = n;
1974         size = sizeof(*dl) + n * sizeof(*dr);
1975
1976         err = copy_to_user(arg, dl, size);
1977         kfree(dl);
1978
1979         return err ? -EFAULT : 0;
1980 }
1981
1982 int hci_get_dev_info(void __user *arg)
1983 {
1984         struct hci_dev *hdev;
1985         struct hci_dev_info di;
1986         unsigned long flags;
1987         int err = 0;
1988
1989         if (copy_from_user(&di, arg, sizeof(di)))
1990                 return -EFAULT;
1991
1992         hdev = hci_dev_get(di.dev_id);
1993         if (!hdev)
1994                 return -ENODEV;
1995
1996         /* When the auto-off is configured it means the transport
1997          * is running, but in that case still indicate that the
1998          * device is actually down.
1999          */
2000         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2001                 flags = hdev->flags & ~BIT(HCI_UP);
2002         else
2003                 flags = hdev->flags;
2004
2005         strcpy(di.name, hdev->name);
2006         di.bdaddr   = hdev->bdaddr;
2007         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2008         di.flags    = flags;
2009         di.pkt_type = hdev->pkt_type;
2010         if (lmp_bredr_capable(hdev)) {
2011                 di.acl_mtu  = hdev->acl_mtu;
2012                 di.acl_pkts = hdev->acl_pkts;
2013                 di.sco_mtu  = hdev->sco_mtu;
2014                 di.sco_pkts = hdev->sco_pkts;
2015         } else {
2016                 di.acl_mtu  = hdev->le_mtu;
2017                 di.acl_pkts = hdev->le_pkts;
2018                 di.sco_mtu  = 0;
2019                 di.sco_pkts = 0;
2020         }
2021         di.link_policy = hdev->link_policy;
2022         di.link_mode   = hdev->link_mode;
2023
2024         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2025         memcpy(&di.features, &hdev->features, sizeof(di.features));
2026
2027         if (copy_to_user(arg, &di, sizeof(di)))
2028                 err = -EFAULT;
2029
2030         hci_dev_put(hdev);
2031
2032         return err;
2033 }
2034
2035 /* ---- Interface to HCI drivers ---- */
2036
2037 static int hci_rfkill_set_block(void *data, bool blocked)
2038 {
2039         struct hci_dev *hdev = data;
2040
2041         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2042
2043         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2044                 return -EBUSY;
2045
2046         if (blocked) {
2047                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2048                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2049                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2050                         hci_dev_do_close(hdev);
2051         } else {
2052                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2053         }
2054
2055         return 0;
2056 }
2057
2058 static const struct rfkill_ops hci_rfkill_ops = {
2059         .set_block = hci_rfkill_set_block,
2060 };
2061
2062 static void hci_power_on(struct work_struct *work)
2063 {
2064         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2065         int err;
2066
2067         BT_DBG("%s", hdev->name);
2068
2069         err = hci_dev_do_open(hdev);
2070         if (err < 0) {
2071                 hci_dev_lock(hdev);
2072                 mgmt_set_powered_failed(hdev, err);
2073                 hci_dev_unlock(hdev);
2074                 return;
2075         }
2076
2077         /* During the HCI setup phase, a few error conditions are
2078          * ignored and they need to be checked now. If they are still
2079          * valid, it is important to turn the device back off.
2080          */
2081         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2082             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2083             (hdev->dev_type == HCI_BREDR &&
2084              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2085              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2086                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2087                 hci_dev_do_close(hdev);
2088         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2089                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2090                                    HCI_AUTO_OFF_TIMEOUT);
2091         }
2092
2093         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2094                 /* For unconfigured devices, set the HCI_RAW flag
2095                  * so that userspace can easily identify them.
2096                  */
2097                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2098                         set_bit(HCI_RAW, &hdev->flags);
2099
2100                 /* For fully configured devices, this will send
2101                  * the Index Added event. For unconfigured devices,
2102                  * it will send Unconfigued Index Added event.
2103                  *
2104                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2105                  * and no event will be send.
2106                  */
2107                 mgmt_index_added(hdev);
2108         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2109                 /* When the controller is now configured, then it
2110                  * is important to clear the HCI_RAW flag.
2111                  */
2112                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2113                         clear_bit(HCI_RAW, &hdev->flags);
2114
2115                 /* Powering on the controller with HCI_CONFIG set only
2116                  * happens with the transition from unconfigured to
2117                  * configured. This will send the Index Added event.
2118                  */
2119                 mgmt_index_added(hdev);
2120         }
2121 }
2122
2123 static void hci_power_off(struct work_struct *work)
2124 {
2125         struct hci_dev *hdev = container_of(work, struct hci_dev,
2126                                             power_off.work);
2127
2128         BT_DBG("%s", hdev->name);
2129
2130         hci_dev_do_close(hdev);
2131 }
2132
2133 static void hci_error_reset(struct work_struct *work)
2134 {
2135         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2136
2137         BT_DBG("%s", hdev->name);
2138
2139         if (hdev->hw_error)
2140                 hdev->hw_error(hdev, hdev->hw_error_code);
2141         else
2142                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2143                        hdev->hw_error_code);
2144
2145         if (hci_dev_do_close(hdev))
2146                 return;
2147
2148         hci_dev_do_open(hdev);
2149 }
2150
2151 static void hci_discov_off(struct work_struct *work)
2152 {
2153         struct hci_dev *hdev;
2154
2155         hdev = container_of(work, struct hci_dev, discov_off.work);
2156
2157         BT_DBG("%s", hdev->name);
2158
2159         mgmt_discoverable_timeout(hdev);
2160 }
2161
2162 static void hci_adv_timeout_expire(struct work_struct *work)
2163 {
2164         struct hci_dev *hdev;
2165
2166         hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2167
2168         BT_DBG("%s", hdev->name);
2169
2170         mgmt_adv_timeout_expired(hdev);
2171 }
2172
2173 void hci_uuids_clear(struct hci_dev *hdev)
2174 {
2175         struct bt_uuid *uuid, *tmp;
2176
2177         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2178                 list_del(&uuid->list);
2179                 kfree(uuid);
2180         }
2181 }
2182
2183 void hci_link_keys_clear(struct hci_dev *hdev)
2184 {
2185         struct link_key *key;
2186
2187         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2188                 list_del_rcu(&key->list);
2189                 kfree_rcu(key, rcu);
2190         }
2191 }
2192
2193 void hci_smp_ltks_clear(struct hci_dev *hdev)
2194 {
2195         struct smp_ltk *k;
2196
2197         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2198                 list_del_rcu(&k->list);
2199                 kfree_rcu(k, rcu);
2200         }
2201 }
2202
2203 void hci_smp_irks_clear(struct hci_dev *hdev)
2204 {
2205         struct smp_irk *k;
2206
2207         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2208                 list_del_rcu(&k->list);
2209                 kfree_rcu(k, rcu);
2210         }
2211 }
2212
2213 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2214 {
2215         struct link_key *k;
2216
2217         rcu_read_lock();
2218         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2219                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2220                         rcu_read_unlock();
2221                         return k;
2222                 }
2223         }
2224         rcu_read_unlock();
2225
2226         return NULL;
2227 }
2228
2229 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2230                                u8 key_type, u8 old_key_type)
2231 {
2232         /* Legacy key */
2233         if (key_type < 0x03)
2234                 return true;
2235
2236         /* Debug keys are insecure so don't store them persistently */
2237         if (key_type == HCI_LK_DEBUG_COMBINATION)
2238                 return false;
2239
2240         /* Changed combination key and there's no previous one */
2241         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2242                 return false;
2243
2244         /* Security mode 3 case */
2245         if (!conn)
2246                 return true;
2247
2248         /* BR/EDR key derived using SC from an LE link */
2249         if (conn->type == LE_LINK)
2250                 return true;
2251
2252         /* Neither local nor remote side had no-bonding as requirement */
2253         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2254                 return true;
2255
2256         /* Local side had dedicated bonding as requirement */
2257         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2258                 return true;
2259
2260         /* Remote side had dedicated bonding as requirement */
2261         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2262                 return true;
2263
2264         /* If none of the above criteria match, then don't store the key
2265          * persistently */
2266         return false;
2267 }
2268
2269 static u8 ltk_role(u8 type)
2270 {
2271         if (type == SMP_LTK)
2272                 return HCI_ROLE_MASTER;
2273
2274         return HCI_ROLE_SLAVE;
2275 }
2276
2277 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2278                              u8 addr_type, u8 role)
2279 {
2280         struct smp_ltk *k;
2281
2282         rcu_read_lock();
2283         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2284                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2285                         continue;
2286
2287                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2288                         rcu_read_unlock();
2289                         return k;
2290                 }
2291         }
2292         rcu_read_unlock();
2293
2294         return NULL;
2295 }
2296
2297 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2298 {
2299         struct smp_irk *irk;
2300
2301         rcu_read_lock();
2302         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2303                 if (!bacmp(&irk->rpa, rpa)) {
2304                         rcu_read_unlock();
2305                         return irk;
2306                 }
2307         }
2308
2309         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2310                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2311                         bacpy(&irk->rpa, rpa);
2312                         rcu_read_unlock();
2313                         return irk;
2314                 }
2315         }
2316         rcu_read_unlock();
2317
2318         return NULL;
2319 }
2320
2321 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2322                                      u8 addr_type)
2323 {
2324         struct smp_irk *irk;
2325
2326         /* Identity Address must be public or static random */
2327         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2328                 return NULL;
2329
2330         rcu_read_lock();
2331         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2332                 if (addr_type == irk->addr_type &&
2333                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2334                         rcu_read_unlock();
2335                         return irk;
2336                 }
2337         }
2338         rcu_read_unlock();
2339
2340         return NULL;
2341 }
2342
2343 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2344                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2345                                   u8 pin_len, bool *persistent)
2346 {
2347         struct link_key *key, *old_key;
2348         u8 old_key_type;
2349
2350         old_key = hci_find_link_key(hdev, bdaddr);
2351         if (old_key) {
2352                 old_key_type = old_key->type;
2353                 key = old_key;
2354         } else {
2355                 old_key_type = conn ? conn->key_type : 0xff;
2356                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2357                 if (!key)
2358                         return NULL;
2359                 list_add_rcu(&key->list, &hdev->link_keys);
2360         }
2361
2362         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2363
2364         /* Some buggy controller combinations generate a changed
2365          * combination key for legacy pairing even when there's no
2366          * previous key */
2367         if (type == HCI_LK_CHANGED_COMBINATION &&
2368             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2369                 type = HCI_LK_COMBINATION;
2370                 if (conn)
2371                         conn->key_type = type;
2372         }
2373
2374         bacpy(&key->bdaddr, bdaddr);
2375         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2376         key->pin_len = pin_len;
2377
2378         if (type == HCI_LK_CHANGED_COMBINATION)
2379                 key->type = old_key_type;
2380         else
2381                 key->type = type;
2382
2383         if (persistent)
2384                 *persistent = hci_persistent_key(hdev, conn, type,
2385                                                  old_key_type);
2386
2387         return key;
2388 }
2389
2390 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2391                             u8 addr_type, u8 type, u8 authenticated,
2392                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2393 {
2394         struct smp_ltk *key, *old_key;
2395         u8 role = ltk_role(type);
2396
2397         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2398         if (old_key)
2399                 key = old_key;
2400         else {
2401                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2402                 if (!key)
2403                         return NULL;
2404                 list_add_rcu(&key->list, &hdev->long_term_keys);
2405         }
2406
2407         bacpy(&key->bdaddr, bdaddr);
2408         key->bdaddr_type = addr_type;
2409         memcpy(key->val, tk, sizeof(key->val));
2410         key->authenticated = authenticated;
2411         key->ediv = ediv;
2412         key->rand = rand;
2413         key->enc_size = enc_size;
2414         key->type = type;
2415
2416         return key;
2417 }
2418
2419 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2420                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2421 {
2422         struct smp_irk *irk;
2423
2424         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2425         if (!irk) {
2426                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2427                 if (!irk)
2428                         return NULL;
2429
2430                 bacpy(&irk->bdaddr, bdaddr);
2431                 irk->addr_type = addr_type;
2432
2433                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2434         }
2435
2436         memcpy(irk->val, val, 16);
2437         bacpy(&irk->rpa, rpa);
2438
2439         return irk;
2440 }
2441
2442 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2443 {
2444         struct link_key *key;
2445
2446         key = hci_find_link_key(hdev, bdaddr);
2447         if (!key)
2448                 return -ENOENT;
2449
2450         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2451
2452         list_del_rcu(&key->list);
2453         kfree_rcu(key, rcu);
2454
2455         return 0;
2456 }
2457
2458 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2459 {
2460         struct smp_ltk *k;
2461         int removed = 0;
2462
2463         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2464                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2465                         continue;
2466
2467                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2468
2469                 list_del_rcu(&k->list);
2470                 kfree_rcu(k, rcu);
2471                 removed++;
2472         }
2473
2474         return removed ? 0 : -ENOENT;
2475 }
2476
2477 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2478 {
2479         struct smp_irk *k;
2480
2481         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2482                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2483                         continue;
2484
2485                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2486
2487                 list_del_rcu(&k->list);
2488                 kfree_rcu(k, rcu);
2489         }
2490 }
2491
2492 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2493 {
2494         struct smp_ltk *k;
2495         struct smp_irk *irk;
2496         u8 addr_type;
2497
2498         if (type == BDADDR_BREDR) {
2499                 if (hci_find_link_key(hdev, bdaddr))
2500                         return true;
2501                 return false;
2502         }
2503
2504         /* Convert to HCI addr type which struct smp_ltk uses */
2505         if (type == BDADDR_LE_PUBLIC)
2506                 addr_type = ADDR_LE_DEV_PUBLIC;
2507         else
2508                 addr_type = ADDR_LE_DEV_RANDOM;
2509
2510         irk = hci_get_irk(hdev, bdaddr, addr_type);
2511         if (irk) {
2512                 bdaddr = &irk->bdaddr;
2513                 addr_type = irk->addr_type;
2514         }
2515
2516         rcu_read_lock();
2517         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2518                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2519                         rcu_read_unlock();
2520                         return true;
2521                 }
2522         }
2523         rcu_read_unlock();
2524
2525         return false;
2526 }
2527
2528 /* HCI command timer function */
2529 static void hci_cmd_timeout(struct work_struct *work)
2530 {
2531         struct hci_dev *hdev = container_of(work, struct hci_dev,
2532                                             cmd_timer.work);
2533
2534         if (hdev->sent_cmd) {
2535                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2536                 u16 opcode = __le16_to_cpu(sent->opcode);
2537
2538                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2539         } else {
2540                 BT_ERR("%s command tx timeout", hdev->name);
2541         }
2542
2543         atomic_set(&hdev->cmd_cnt, 1);
2544         queue_work(hdev->workqueue, &hdev->cmd_work);
2545 }
2546
2547 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2548                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2549 {
2550         struct oob_data *data;
2551
2552         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2553                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2554                         continue;
2555                 if (data->bdaddr_type != bdaddr_type)
2556                         continue;
2557                 return data;
2558         }
2559
2560         return NULL;
2561 }
2562
2563 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2564                                u8 bdaddr_type)
2565 {
2566         struct oob_data *data;
2567
2568         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2569         if (!data)
2570                 return -ENOENT;
2571
2572         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2573
2574         list_del(&data->list);
2575         kfree(data);
2576
2577         return 0;
2578 }
2579
2580 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2581 {
2582         struct oob_data *data, *n;
2583
2584         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2585                 list_del(&data->list);
2586                 kfree(data);
2587         }
2588 }
2589
2590 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2591                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2592                             u8 *hash256, u8 *rand256)
2593 {
2594         struct oob_data *data;
2595
2596         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2597         if (!data) {
2598                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2599                 if (!data)
2600                         return -ENOMEM;
2601
2602                 bacpy(&data->bdaddr, bdaddr);
2603                 data->bdaddr_type = bdaddr_type;
2604                 list_add(&data->list, &hdev->remote_oob_data);
2605         }
2606
2607         if (hash192 && rand192) {
2608                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2609                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2610                 if (hash256 && rand256)
2611                         data->present = 0x03;
2612         } else {
2613                 memset(data->hash192, 0, sizeof(data->hash192));
2614                 memset(data->rand192, 0, sizeof(data->rand192));
2615                 if (hash256 && rand256)
2616                         data->present = 0x02;
2617                 else
2618                         data->present = 0x00;
2619         }
2620
2621         if (hash256 && rand256) {
2622                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2623                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2624         } else {
2625                 memset(data->hash256, 0, sizeof(data->hash256));
2626                 memset(data->rand256, 0, sizeof(data->rand256));
2627                 if (hash192 && rand192)
2628                         data->present = 0x01;
2629         }
2630
2631         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2632
2633         return 0;
2634 }
2635
2636 /* This function requires the caller holds hdev->lock */
2637 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2638 {
2639         struct adv_info *adv_instance;
2640
2641         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2642                 if (adv_instance->instance == instance)
2643                         return adv_instance;
2644         }
2645
2646         return NULL;
2647 }
2648
2649 /* This function requires the caller holds hdev->lock */
2650 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2651         struct adv_info *cur_instance;
2652
2653         cur_instance = hci_find_adv_instance(hdev, instance);
2654         if (!cur_instance)
2655                 return NULL;
2656
2657         if (cur_instance == list_last_entry(&hdev->adv_instances,
2658                                             struct adv_info, list))
2659                 return list_first_entry(&hdev->adv_instances,
2660                                                  struct adv_info, list);
2661         else
2662                 return list_next_entry(cur_instance, list);
2663 }
2664
2665 /* This function requires the caller holds hdev->lock */
2666 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2667 {
2668         struct adv_info *adv_instance;
2669
2670         adv_instance = hci_find_adv_instance(hdev, instance);
2671         if (!adv_instance)
2672                 return -ENOENT;
2673
2674         BT_DBG("%s removing %dMR", hdev->name, instance);
2675
2676         if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2677                 cancel_delayed_work(&hdev->adv_instance_expire);
2678                 hdev->adv_instance_timeout = 0;
2679         }
2680
2681         list_del(&adv_instance->list);
2682         kfree(adv_instance);
2683
2684         hdev->adv_instance_cnt--;
2685
2686         return 0;
2687 }
2688
2689 /* This function requires the caller holds hdev->lock */
2690 void hci_adv_instances_clear(struct hci_dev *hdev)
2691 {
2692         struct adv_info *adv_instance, *n;
2693
2694         if (hdev->adv_instance_timeout) {
2695                 cancel_delayed_work(&hdev->adv_instance_expire);
2696                 hdev->adv_instance_timeout = 0;
2697         }
2698
2699         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2700                 list_del(&adv_instance->list);
2701                 kfree(adv_instance);
2702         }
2703
2704         hdev->adv_instance_cnt = 0;
2705 }
2706
2707 /* This function requires the caller holds hdev->lock */
2708 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2709                          u16 adv_data_len, u8 *adv_data,
2710                          u16 scan_rsp_len, u8 *scan_rsp_data,
2711                          u16 timeout, u16 duration)
2712 {
2713         struct adv_info *adv_instance;
2714
2715         adv_instance = hci_find_adv_instance(hdev, instance);
2716         if (adv_instance) {
2717                 memset(adv_instance->adv_data, 0,
2718                        sizeof(adv_instance->adv_data));
2719                 memset(adv_instance->scan_rsp_data, 0,
2720                        sizeof(adv_instance->scan_rsp_data));
2721         } else {
2722                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2723                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2724                         return -EOVERFLOW;
2725
2726                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2727                 if (!adv_instance)
2728                         return -ENOMEM;
2729
2730                 adv_instance->pending = true;
2731                 adv_instance->instance = instance;
2732                 list_add(&adv_instance->list, &hdev->adv_instances);
2733                 hdev->adv_instance_cnt++;
2734         }
2735
2736         adv_instance->flags = flags;
2737         adv_instance->adv_data_len = adv_data_len;
2738         adv_instance->scan_rsp_len = scan_rsp_len;
2739
2740         if (adv_data_len)
2741                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2742
2743         if (scan_rsp_len)
2744                 memcpy(adv_instance->scan_rsp_data,
2745                        scan_rsp_data, scan_rsp_len);
2746
2747         adv_instance->timeout = timeout;
2748         adv_instance->remaining_time = timeout;
2749
2750         if (duration == 0)
2751                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2752         else
2753                 adv_instance->duration = duration;
2754
2755         BT_DBG("%s for %dMR", hdev->name, instance);
2756
2757         return 0;
2758 }
2759
2760 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2761                                          bdaddr_t *bdaddr, u8 type)
2762 {
2763         struct bdaddr_list *b;
2764
2765         list_for_each_entry(b, bdaddr_list, list) {
2766                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2767                         return b;
2768         }
2769
2770         return NULL;
2771 }
2772
2773 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2774 {
2775         struct list_head *p, *n;
2776
2777         list_for_each_safe(p, n, bdaddr_list) {
2778                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2779
2780                 list_del(p);
2781                 kfree(b);
2782         }
2783 }
2784
2785 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2786 {
2787         struct bdaddr_list *entry;
2788
2789         if (!bacmp(bdaddr, BDADDR_ANY))
2790                 return -EBADF;
2791
2792         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2793                 return -EEXIST;
2794
2795         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2796         if (!entry)
2797                 return -ENOMEM;
2798
2799         bacpy(&entry->bdaddr, bdaddr);
2800         entry->bdaddr_type = type;
2801
2802         list_add(&entry->list, list);
2803
2804         return 0;
2805 }
2806
2807 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2808 {
2809         struct bdaddr_list *entry;
2810
2811         if (!bacmp(bdaddr, BDADDR_ANY)) {
2812                 hci_bdaddr_list_clear(list);
2813                 return 0;
2814         }
2815
2816         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2817         if (!entry)
2818                 return -ENOENT;
2819
2820         list_del(&entry->list);
2821         kfree(entry);
2822
2823         return 0;
2824 }
2825
2826 /* This function requires the caller holds hdev->lock */
2827 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2828                                                bdaddr_t *addr, u8 addr_type)
2829 {
2830         struct hci_conn_params *params;
2831
2832         list_for_each_entry(params, &hdev->le_conn_params, list) {
2833                 if (bacmp(&params->addr, addr) == 0 &&
2834                     params->addr_type == addr_type) {
2835                         return params;
2836                 }
2837         }
2838
2839         return NULL;
2840 }
2841
2842 /* This function requires the caller holds hdev->lock */
2843 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2844                                                   bdaddr_t *addr, u8 addr_type)
2845 {
2846         struct hci_conn_params *param;
2847
2848         list_for_each_entry(param, list, action) {
2849                 if (bacmp(&param->addr, addr) == 0 &&
2850                     param->addr_type == addr_type)
2851                         return param;
2852         }
2853
2854         return NULL;
2855 }
2856
2857 /* This function requires the caller holds hdev->lock */
2858 struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
2859                                                     bdaddr_t *addr,
2860                                                     u8 addr_type)
2861 {
2862         struct hci_conn_params *param;
2863
2864         list_for_each_entry(param, &hdev->pend_le_conns, action) {
2865                 if (bacmp(&param->addr, addr) == 0 &&
2866                     param->addr_type == addr_type &&
2867                     param->explicit_connect)
2868                         return param;
2869         }
2870
2871         list_for_each_entry(param, &hdev->pend_le_reports, action) {
2872                 if (bacmp(&param->addr, addr) == 0 &&
2873                     param->addr_type == addr_type &&
2874                     param->explicit_connect)
2875                         return param;
2876         }
2877
2878         return NULL;
2879 }
2880
2881 /* This function requires the caller holds hdev->lock */
2882 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2883                                             bdaddr_t *addr, u8 addr_type)
2884 {
2885         struct hci_conn_params *params;
2886
2887         params = hci_conn_params_lookup(hdev, addr, addr_type);
2888         if (params)
2889                 return params;
2890
2891         params = kzalloc(sizeof(*params), GFP_KERNEL);
2892         if (!params) {
2893                 BT_ERR("Out of memory");
2894                 return NULL;
2895         }
2896
2897         bacpy(&params->addr, addr);
2898         params->addr_type = addr_type;
2899
2900         list_add(&params->list, &hdev->le_conn_params);
2901         INIT_LIST_HEAD(&params->action);
2902
2903         params->conn_min_interval = hdev->le_conn_min_interval;
2904         params->conn_max_interval = hdev->le_conn_max_interval;
2905         params->conn_latency = hdev->le_conn_latency;
2906         params->supervision_timeout = hdev->le_supv_timeout;
2907         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2908
2909         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2910
2911         return params;
2912 }
2913
2914 static void hci_conn_params_free(struct hci_conn_params *params)
2915 {
2916         if (params->conn) {
2917                 hci_conn_drop(params->conn);
2918                 hci_conn_put(params->conn);
2919         }
2920
2921         list_del(&params->action);
2922         list_del(&params->list);
2923         kfree(params);
2924 }
2925
2926 /* This function requires the caller holds hdev->lock */
2927 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2928 {
2929         struct hci_conn_params *params;
2930
2931         params = hci_conn_params_lookup(hdev, addr, addr_type);
2932         if (!params)
2933                 return;
2934
2935         hci_conn_params_free(params);
2936
2937         hci_update_background_scan(hdev);
2938
2939         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2940 }
2941
2942 /* This function requires the caller holds hdev->lock */
2943 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2944 {
2945         struct hci_conn_params *params, *tmp;
2946
2947         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2948                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2949                         continue;
2950
2951                 /* If trying to estabilish one time connection to disabled
2952                  * device, leave the params, but mark them as just once.
2953                  */
2954                 if (params->explicit_connect) {
2955                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2956                         continue;
2957                 }
2958
2959                 list_del(&params->list);
2960                 kfree(params);
2961         }
2962
2963         BT_DBG("All LE disabled connection parameters were removed");
2964 }
2965
2966 /* This function requires the caller holds hdev->lock */
2967 void hci_conn_params_clear_all(struct hci_dev *hdev)
2968 {
2969         struct hci_conn_params *params, *tmp;
2970
2971         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2972                 hci_conn_params_free(params);
2973
2974         hci_update_background_scan(hdev);
2975
2976         BT_DBG("All LE connection parameters were removed");
2977 }
2978
2979 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2980 {
2981         if (status) {
2982                 BT_ERR("Failed to start inquiry: status %d", status);
2983
2984                 hci_dev_lock(hdev);
2985                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2986                 hci_dev_unlock(hdev);
2987                 return;
2988         }
2989 }
2990
2991 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2992                                           u16 opcode)
2993 {
2994         /* General inquiry access code (GIAC) */
2995         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2996         struct hci_cp_inquiry cp;
2997         int err;
2998
2999         if (status) {
3000                 BT_ERR("Failed to disable LE scanning: status %d", status);
3001                 return;
3002         }
3003
3004         hdev->discovery.scan_start = 0;
3005
3006         switch (hdev->discovery.type) {
3007         case DISCOV_TYPE_LE:
3008                 hci_dev_lock(hdev);
3009                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3010                 hci_dev_unlock(hdev);
3011                 break;
3012
3013         case DISCOV_TYPE_INTERLEAVED:
3014                 hci_dev_lock(hdev);
3015
3016                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3017                              &hdev->quirks)) {
3018                         /* If we were running LE only scan, change discovery
3019                          * state. If we were running both LE and BR/EDR inquiry
3020                          * simultaneously, and BR/EDR inquiry is already
3021                          * finished, stop discovery, otherwise BR/EDR inquiry
3022                          * will stop discovery when finished. If we will resolve
3023                          * remote device name, do not change discovery state.
3024                          */
3025                         if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3026                             hdev->discovery.state != DISCOVERY_RESOLVING)
3027                                 hci_discovery_set_state(hdev,
3028                                                         DISCOVERY_STOPPED);
3029                 } else {
3030                         struct hci_request req;
3031
3032                         hci_inquiry_cache_flush(hdev);
3033
3034                         hci_req_init(&req, hdev);
3035
3036                         memset(&cp, 0, sizeof(cp));
3037                         memcpy(&cp.lap, lap, sizeof(cp.lap));
3038                         cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3039                         hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3040
3041                         err = hci_req_run(&req, inquiry_complete);
3042                         if (err) {
3043                                 BT_ERR("Inquiry request failed: err %d", err);
3044                                 hci_discovery_set_state(hdev,
3045                                                         DISCOVERY_STOPPED);
3046                         }
3047                 }
3048
3049                 hci_dev_unlock(hdev);
3050                 break;
3051         }
3052 }
3053
3054 static void le_scan_disable_work(struct work_struct *work)
3055 {
3056         struct hci_dev *hdev = container_of(work, struct hci_dev,
3057                                             le_scan_disable.work);
3058         struct hci_request req;
3059         int err;
3060
3061         BT_DBG("%s", hdev->name);
3062
3063         cancel_delayed_work_sync(&hdev->le_scan_restart);
3064
3065         hci_req_init(&req, hdev);
3066
3067         hci_req_add_le_scan_disable(&req);
3068
3069         err = hci_req_run(&req, le_scan_disable_work_complete);
3070         if (err)
3071                 BT_ERR("Disable LE scanning request failed: err %d", err);
3072 }
3073
3074 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3075                                           u16 opcode)
3076 {
3077         unsigned long timeout, duration, scan_start, now;
3078
3079         BT_DBG("%s", hdev->name);
3080
3081         if (status) {
3082                 BT_ERR("Failed to restart LE scan: status %d", status);
3083                 return;
3084         }
3085
3086         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3087             !hdev->discovery.scan_start)
3088                 return;
3089
3090         /* When the scan was started, hdev->le_scan_disable has been queued
3091          * after duration from scan_start. During scan restart this job
3092          * has been canceled, and we need to queue it again after proper
3093          * timeout, to make sure that scan does not run indefinitely.
3094          */
3095         duration = hdev->discovery.scan_duration;
3096         scan_start = hdev->discovery.scan_start;
3097         now = jiffies;
3098         if (now - scan_start <= duration) {
3099                 int elapsed;
3100
3101                 if (now >= scan_start)
3102                         elapsed = now - scan_start;
3103                 else
3104                         elapsed = ULONG_MAX - scan_start + now;
3105
3106                 timeout = duration - elapsed;
3107         } else {
3108                 timeout = 0;
3109         }
3110         queue_delayed_work(hdev->workqueue,
3111                            &hdev->le_scan_disable, timeout);
3112 }
3113
3114 static void le_scan_restart_work(struct work_struct *work)
3115 {
3116         struct hci_dev *hdev = container_of(work, struct hci_dev,
3117                                             le_scan_restart.work);
3118         struct hci_request req;
3119         struct hci_cp_le_set_scan_enable cp;
3120         int err;
3121
3122         BT_DBG("%s", hdev->name);
3123
3124         /* If controller is not scanning we are done. */
3125         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3126                 return;
3127
3128         hci_req_init(&req, hdev);
3129
3130         hci_req_add_le_scan_disable(&req);
3131
3132         memset(&cp, 0, sizeof(cp));
3133         cp.enable = LE_SCAN_ENABLE;
3134         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3135         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3136
3137         err = hci_req_run(&req, le_scan_restart_work_complete);
3138         if (err)
3139                 BT_ERR("Restart LE scan request failed: err %d", err);
3140 }
3141
3142 /* Copy the Identity Address of the controller.
3143  *
3144  * If the controller has a public BD_ADDR, then by default use that one.
3145  * If this is a LE only controller without a public address, default to
3146  * the static random address.
3147  *
3148  * For debugging purposes it is possible to force controllers with a
3149  * public address to use the static random address instead.
3150  *
3151  * In case BR/EDR has been disabled on a dual-mode controller and
3152  * userspace has configured a static address, then that address
3153  * becomes the identity address instead of the public BR/EDR address.
3154  */
3155 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3156                                u8 *bdaddr_type)
3157 {
3158         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3159             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3160             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3161              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3162                 bacpy(bdaddr, &hdev->static_addr);
3163                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3164         } else {
3165                 bacpy(bdaddr, &hdev->bdaddr);
3166                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3167         }
3168 }
3169
3170 /* Alloc HCI device */
3171 struct hci_dev *hci_alloc_dev(void)
3172 {
3173         struct hci_dev *hdev;
3174
3175         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3176         if (!hdev)
3177                 return NULL;
3178
3179         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3180         hdev->esco_type = (ESCO_HV1);
3181         hdev->link_mode = (HCI_LM_ACCEPT);
3182         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3183         hdev->io_capability = 0x03;     /* No Input No Output */
3184         hdev->manufacturer = 0xffff;    /* Default to internal use */
3185         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3186         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3187         hdev->adv_instance_cnt = 0;
3188         hdev->cur_adv_instance = 0x00;
3189         hdev->adv_instance_timeout = 0;
3190
3191         hdev->sniff_max_interval = 800;
3192         hdev->sniff_min_interval = 80;
3193
3194         hdev->le_adv_channel_map = 0x07;
3195         hdev->le_adv_min_interval = 0x0800;
3196         hdev->le_adv_max_interval = 0x0800;
3197         hdev->le_scan_interval = 0x0060;
3198         hdev->le_scan_window = 0x0030;
3199         hdev->le_conn_min_interval = 0x0028;
3200         hdev->le_conn_max_interval = 0x0038;
3201         hdev->le_conn_latency = 0x0000;
3202         hdev->le_supv_timeout = 0x002a;
3203         hdev->le_def_tx_len = 0x001b;
3204         hdev->le_def_tx_time = 0x0148;
3205         hdev->le_max_tx_len = 0x001b;
3206         hdev->le_max_tx_time = 0x0148;
3207         hdev->le_max_rx_len = 0x001b;
3208         hdev->le_max_rx_time = 0x0148;
3209
3210         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3211         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3212         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3213         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3214
3215         mutex_init(&hdev->lock);
3216         mutex_init(&hdev->req_lock);
3217
3218         INIT_LIST_HEAD(&hdev->mgmt_pending);
3219         INIT_LIST_HEAD(&hdev->blacklist);
3220         INIT_LIST_HEAD(&hdev->whitelist);
3221         INIT_LIST_HEAD(&hdev->uuids);
3222         INIT_LIST_HEAD(&hdev->link_keys);
3223         INIT_LIST_HEAD(&hdev->long_term_keys);
3224         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3225         INIT_LIST_HEAD(&hdev->remote_oob_data);
3226         INIT_LIST_HEAD(&hdev->le_white_list);
3227         INIT_LIST_HEAD(&hdev->le_conn_params);
3228         INIT_LIST_HEAD(&hdev->pend_le_conns);
3229         INIT_LIST_HEAD(&hdev->pend_le_reports);
3230         INIT_LIST_HEAD(&hdev->conn_hash.list);
3231         INIT_LIST_HEAD(&hdev->adv_instances);
3232
3233         INIT_WORK(&hdev->rx_work, hci_rx_work);
3234         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3235         INIT_WORK(&hdev->tx_work, hci_tx_work);
3236         INIT_WORK(&hdev->power_on, hci_power_on);
3237         INIT_WORK(&hdev->error_reset, hci_error_reset);
3238
3239         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3240         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3241         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3242         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3243         INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3244
3245         skb_queue_head_init(&hdev->rx_q);
3246         skb_queue_head_init(&hdev->cmd_q);
3247         skb_queue_head_init(&hdev->raw_q);
3248
3249         init_waitqueue_head(&hdev->req_wait_q);
3250
3251         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3252
3253         hci_init_sysfs(hdev);
3254         discovery_init(hdev);
3255
3256         return hdev;
3257 }
3258 EXPORT_SYMBOL(hci_alloc_dev);
3259
3260 /* Free HCI device */
3261 void hci_free_dev(struct hci_dev *hdev)
3262 {
3263         /* will free via device release */
3264         put_device(&hdev->dev);
3265 }
3266 EXPORT_SYMBOL(hci_free_dev);
3267
3268 /* Register HCI device */
3269 int hci_register_dev(struct hci_dev *hdev)
3270 {
3271         int id, error;
3272
3273         if (!hdev->open || !hdev->close || !hdev->send)
3274                 return -EINVAL;
3275
3276         /* Do not allow HCI_AMP devices to register at index 0,
3277          * so the index can be used as the AMP controller ID.
3278          */
3279         switch (hdev->dev_type) {
3280         case HCI_BREDR:
3281                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3282                 break;
3283         case HCI_AMP:
3284                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3285                 break;
3286         default:
3287                 return -EINVAL;
3288         }
3289
3290         if (id < 0)
3291                 return id;
3292
3293         sprintf(hdev->name, "hci%d", id);
3294         hdev->id = id;
3295
3296         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3297
3298         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3299                                           WQ_MEM_RECLAIM, 1, hdev->name);
3300         if (!hdev->workqueue) {
3301                 error = -ENOMEM;
3302                 goto err;
3303         }
3304
3305         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3306                                               WQ_MEM_RECLAIM, 1, hdev->name);
3307         if (!hdev->req_workqueue) {
3308                 destroy_workqueue(hdev->workqueue);
3309                 error = -ENOMEM;
3310                 goto err;
3311         }
3312
3313         if (!IS_ERR_OR_NULL(bt_debugfs))
3314                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3315
3316         dev_set_name(&hdev->dev, "%s", hdev->name);
3317
3318         error = device_add(&hdev->dev);
3319         if (error < 0)
3320                 goto err_wqueue;
3321
3322         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3323                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3324                                     hdev);
3325         if (hdev->rfkill) {
3326                 if (rfkill_register(hdev->rfkill) < 0) {
3327                         rfkill_destroy(hdev->rfkill);
3328                         hdev->rfkill = NULL;
3329                 }
3330         }
3331
3332         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3333                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3334
3335         hci_dev_set_flag(hdev, HCI_SETUP);
3336         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3337
3338         if (hdev->dev_type == HCI_BREDR) {
3339                 /* Assume BR/EDR support until proven otherwise (such as
3340                  * through reading supported features during init.
3341                  */
3342                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3343         }
3344
3345         write_lock(&hci_dev_list_lock);
3346         list_add(&hdev->list, &hci_dev_list);
3347         write_unlock(&hci_dev_list_lock);
3348
3349         /* Devices that are marked for raw-only usage are unconfigured
3350          * and should not be included in normal operation.
3351          */
3352         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3353                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3354
3355         hci_notify(hdev, HCI_DEV_REG);
3356         hci_dev_hold(hdev);
3357
3358         queue_work(hdev->req_workqueue, &hdev->power_on);
3359
3360         return id;
3361
3362 err_wqueue:
3363         destroy_workqueue(hdev->workqueue);
3364         destroy_workqueue(hdev->req_workqueue);
3365 err:
3366         ida_simple_remove(&hci_index_ida, hdev->id);
3367
3368         return error;
3369 }
3370 EXPORT_SYMBOL(hci_register_dev);
3371
3372 /* Unregister HCI device */
3373 void hci_unregister_dev(struct hci_dev *hdev)
3374 {
3375         int id;
3376
3377         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3378
3379         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3380
3381         id = hdev->id;
3382
3383         write_lock(&hci_dev_list_lock);
3384         list_del(&hdev->list);
3385         write_unlock(&hci_dev_list_lock);
3386
3387         hci_dev_do_close(hdev);
3388
3389         cancel_work_sync(&hdev->power_on);
3390
3391         if (!test_bit(HCI_INIT, &hdev->flags) &&
3392             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3393             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3394                 hci_dev_lock(hdev);
3395                 mgmt_index_removed(hdev);
3396                 hci_dev_unlock(hdev);
3397         }
3398
3399         /* mgmt_index_removed should take care of emptying the
3400          * pending list */
3401         BUG_ON(!list_empty(&hdev->mgmt_pending));
3402
3403         hci_notify(hdev, HCI_DEV_UNREG);
3404
3405         if (hdev->rfkill) {
3406                 rfkill_unregister(hdev->rfkill);
3407                 rfkill_destroy(hdev->rfkill);
3408         }
3409
3410         device_del(&hdev->dev);
3411
3412         debugfs_remove_recursive(hdev->debugfs);
3413
3414         destroy_workqueue(hdev->workqueue);
3415         destroy_workqueue(hdev->req_workqueue);
3416
3417         hci_dev_lock(hdev);
3418         hci_bdaddr_list_clear(&hdev->blacklist);
3419         hci_bdaddr_list_clear(&hdev->whitelist);
3420         hci_uuids_clear(hdev);
3421         hci_link_keys_clear(hdev);
3422         hci_smp_ltks_clear(hdev);
3423         hci_smp_irks_clear(hdev);
3424         hci_remote_oob_data_clear(hdev);
3425         hci_adv_instances_clear(hdev);
3426         hci_bdaddr_list_clear(&hdev->le_white_list);
3427         hci_conn_params_clear_all(hdev);
3428         hci_discovery_filter_clear(hdev);
3429         hci_dev_unlock(hdev);
3430
3431         hci_dev_put(hdev);
3432
3433         ida_simple_remove(&hci_index_ida, id);
3434 }
3435 EXPORT_SYMBOL(hci_unregister_dev);
3436
3437 /* Suspend HCI device */
3438 int hci_suspend_dev(struct hci_dev *hdev)
3439 {
3440         hci_notify(hdev, HCI_DEV_SUSPEND);
3441         return 0;
3442 }
3443 EXPORT_SYMBOL(hci_suspend_dev);
3444
3445 /* Resume HCI device */
3446 int hci_resume_dev(struct hci_dev *hdev)
3447 {
3448         hci_notify(hdev, HCI_DEV_RESUME);
3449         return 0;
3450 }
3451 EXPORT_SYMBOL(hci_resume_dev);
3452
3453 /* Reset HCI device */
3454 int hci_reset_dev(struct hci_dev *hdev)
3455 {
3456         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3457         struct sk_buff *skb;
3458
3459         skb = bt_skb_alloc(3, GFP_ATOMIC);
3460         if (!skb)
3461                 return -ENOMEM;
3462
3463         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3464         memcpy(skb_put(skb, 3), hw_err, 3);
3465
3466         /* Send Hardware Error to upper stack */
3467         return hci_recv_frame(hdev, skb);
3468 }
3469 EXPORT_SYMBOL(hci_reset_dev);
3470
3471 /* Receive frame from HCI drivers */
3472 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3473 {
3474         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3475                       && !test_bit(HCI_INIT, &hdev->flags))) {
3476                 kfree_skb(skb);
3477                 return -ENXIO;
3478         }
3479
3480         /* Incoming skb */
3481         bt_cb(skb)->incoming = 1;
3482
3483         /* Time stamp */
3484         __net_timestamp(skb);
3485
3486         skb_queue_tail(&hdev->rx_q, skb);
3487         queue_work(hdev->workqueue, &hdev->rx_work);
3488
3489         return 0;
3490 }
3491 EXPORT_SYMBOL(hci_recv_frame);
3492
3493 /* ---- Interface to upper protocols ---- */
3494
3495 int hci_register_cb(struct hci_cb *cb)
3496 {
3497         BT_DBG("%p name %s", cb, cb->name);
3498
3499         mutex_lock(&hci_cb_list_lock);
3500         list_add_tail(&cb->list, &hci_cb_list);
3501         mutex_unlock(&hci_cb_list_lock);
3502
3503         return 0;
3504 }
3505 EXPORT_SYMBOL(hci_register_cb);
3506
3507 int hci_unregister_cb(struct hci_cb *cb)
3508 {
3509         BT_DBG("%p name %s", cb, cb->name);
3510
3511         mutex_lock(&hci_cb_list_lock);
3512         list_del(&cb->list);
3513         mutex_unlock(&hci_cb_list_lock);
3514
3515         return 0;
3516 }
3517 EXPORT_SYMBOL(hci_unregister_cb);
3518
3519 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3520 {
3521         int err;
3522
3523         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3524
3525         /* Time stamp */
3526         __net_timestamp(skb);
3527
3528         /* Send copy to monitor */
3529         hci_send_to_monitor(hdev, skb);
3530
3531         if (atomic_read(&hdev->promisc)) {
3532                 /* Send copy to the sockets */
3533                 hci_send_to_sock(hdev, skb);
3534         }
3535
3536         /* Get rid of skb owner, prior to sending to the driver. */
3537         skb_orphan(skb);
3538
3539         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3540                 kfree_skb(skb);
3541                 return;
3542         }
3543
3544         err = hdev->send(hdev, skb);
3545         if (err < 0) {
3546                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3547                 kfree_skb(skb);
3548         }
3549 }
3550
3551 /* Send HCI command */
3552 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3553                  const void *param)
3554 {
3555         struct sk_buff *skb;
3556
3557         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3558
3559         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3560         if (!skb) {
3561                 BT_ERR("%s no memory for command", hdev->name);
3562                 return -ENOMEM;
3563         }
3564
3565         /* Stand-alone HCI commands must be flagged as
3566          * single-command requests.
3567          */
3568         bt_cb(skb)->req.start = true;
3569
3570         skb_queue_tail(&hdev->cmd_q, skb);
3571         queue_work(hdev->workqueue, &hdev->cmd_work);
3572
3573         return 0;
3574 }
3575
3576 /* Get data from the previously sent command */
3577 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3578 {
3579         struct hci_command_hdr *hdr;
3580
3581         if (!hdev->sent_cmd)
3582                 return NULL;
3583
3584         hdr = (void *) hdev->sent_cmd->data;
3585
3586         if (hdr->opcode != cpu_to_le16(opcode))
3587                 return NULL;
3588
3589         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3590
3591         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3592 }
3593
3594 /* Send HCI command and wait for command commplete event */
3595 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3596                              const void *param, u32 timeout)
3597 {
3598         struct sk_buff *skb;
3599
3600         if (!test_bit(HCI_UP, &hdev->flags))
3601                 return ERR_PTR(-ENETDOWN);
3602
3603         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3604
3605         hci_req_lock(hdev);
3606         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3607         hci_req_unlock(hdev);
3608
3609         return skb;
3610 }
3611 EXPORT_SYMBOL(hci_cmd_sync);
3612
3613 /* Send ACL data */
3614 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3615 {
3616         struct hci_acl_hdr *hdr;
3617         int len = skb->len;
3618
3619         skb_push(skb, HCI_ACL_HDR_SIZE);
3620         skb_reset_transport_header(skb);
3621         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3622         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3623         hdr->dlen   = cpu_to_le16(len);
3624 }
3625
3626 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3627                           struct sk_buff *skb, __u16 flags)
3628 {
3629         struct hci_conn *conn = chan->conn;
3630         struct hci_dev *hdev = conn->hdev;
3631         struct sk_buff *list;
3632
3633         skb->len = skb_headlen(skb);
3634         skb->data_len = 0;
3635
3636         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3637
3638         switch (hdev->dev_type) {
3639         case HCI_BREDR:
3640                 hci_add_acl_hdr(skb, conn->handle, flags);
3641                 break;
3642         case HCI_AMP:
3643                 hci_add_acl_hdr(skb, chan->handle, flags);
3644                 break;
3645         default:
3646                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3647                 return;
3648         }
3649
3650         list = skb_shinfo(skb)->frag_list;
3651         if (!list) {
3652                 /* Non fragmented */
3653                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3654
3655                 skb_queue_tail(queue, skb);
3656         } else {
3657                 /* Fragmented */
3658                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3659
3660                 skb_shinfo(skb)->frag_list = NULL;
3661
3662                 /* Queue all fragments atomically. We need to use spin_lock_bh
3663                  * here because of 6LoWPAN links, as there this function is
3664                  * called from softirq and using normal spin lock could cause
3665                  * deadlocks.
3666                  */
3667                 spin_lock_bh(&queue->lock);
3668
3669                 __skb_queue_tail(queue, skb);
3670
3671                 flags &= ~ACL_START;
3672                 flags |= ACL_CONT;
3673                 do {
3674                         skb = list; list = list->next;
3675
3676                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3677                         hci_add_acl_hdr(skb, conn->handle, flags);
3678
3679                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3680
3681                         __skb_queue_tail(queue, skb);
3682                 } while (list);
3683
3684                 spin_unlock_bh(&queue->lock);
3685         }
3686 }
3687
3688 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3689 {
3690         struct hci_dev *hdev = chan->conn->hdev;
3691
3692         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3693
3694         hci_queue_acl(chan, &chan->data_q, skb, flags);
3695
3696         queue_work(hdev->workqueue, &hdev->tx_work);
3697 }
3698
3699 /* Send SCO data */
3700 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3701 {
3702         struct hci_dev *hdev = conn->hdev;
3703         struct hci_sco_hdr hdr;
3704
3705         BT_DBG("%s len %d", hdev->name, skb->len);
3706
3707         hdr.handle = cpu_to_le16(conn->handle);
3708         hdr.dlen   = skb->len;
3709
3710         skb_push(skb, HCI_SCO_HDR_SIZE);
3711         skb_reset_transport_header(skb);
3712         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3713
3714         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3715
3716         skb_queue_tail(&conn->data_q, skb);
3717         queue_work(hdev->workqueue, &hdev->tx_work);
3718 }
3719
3720 /* ---- HCI TX task (outgoing data) ---- */
3721
3722 /* HCI Connection scheduler */
3723 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3724                                      int *quote)
3725 {
3726         struct hci_conn_hash *h = &hdev->conn_hash;
3727         struct hci_conn *conn = NULL, *c;
3728         unsigned int num = 0, min = ~0;
3729
3730         /* We don't have to lock device here. Connections are always
3731          * added and removed with TX task disabled. */
3732
3733         rcu_read_lock();
3734
3735         list_for_each_entry_rcu(c, &h->list, list) {
3736                 if (c->type != type || skb_queue_empty(&c->data_q))
3737                         continue;
3738
3739                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3740                         continue;
3741
3742                 num++;
3743
3744                 if (c->sent < min) {
3745                         min  = c->sent;
3746                         conn = c;
3747                 }
3748
3749                 if (hci_conn_num(hdev, type) == num)
3750                         break;
3751         }
3752
3753         rcu_read_unlock();
3754
3755         if (conn) {
3756                 int cnt, q;
3757
3758                 switch (conn->type) {
3759                 case ACL_LINK:
3760                         cnt = hdev->acl_cnt;
3761                         break;
3762                 case SCO_LINK:
3763                 case ESCO_LINK:
3764                         cnt = hdev->sco_cnt;
3765                         break;
3766                 case LE_LINK:
3767                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3768                         break;
3769                 default:
3770                         cnt = 0;
3771                         BT_ERR("Unknown link type");
3772                 }
3773
3774                 q = cnt / num;
3775                 *quote = q ? q : 1;
3776         } else
3777                 *quote = 0;
3778
3779         BT_DBG("conn %p quote %d", conn, *quote);
3780         return conn;
3781 }
3782
3783 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3784 {
3785         struct hci_conn_hash *h = &hdev->conn_hash;
3786         struct hci_conn *c;
3787
3788         BT_ERR("%s link tx timeout", hdev->name);
3789
3790         rcu_read_lock();
3791
3792         /* Kill stalled connections */
3793         list_for_each_entry_rcu(c, &h->list, list) {
3794                 if (c->type == type && c->sent) {
3795                         BT_ERR("%s killing stalled connection %pMR",
3796                                hdev->name, &c->dst);
3797                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3798                 }
3799         }
3800
3801         rcu_read_unlock();
3802 }
3803
3804 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3805                                       int *quote)
3806 {
3807         struct hci_conn_hash *h = &hdev->conn_hash;
3808         struct hci_chan *chan = NULL;
3809         unsigned int num = 0, min = ~0, cur_prio = 0;
3810         struct hci_conn *conn;
3811         int cnt, q, conn_num = 0;
3812
3813         BT_DBG("%s", hdev->name);
3814
3815         rcu_read_lock();
3816
3817         list_for_each_entry_rcu(conn, &h->list, list) {
3818                 struct hci_chan *tmp;
3819
3820                 if (conn->type != type)
3821                         continue;
3822
3823                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3824                         continue;
3825
3826                 conn_num++;
3827
3828                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3829                         struct sk_buff *skb;
3830
3831                         if (skb_queue_empty(&tmp->data_q))
3832                                 continue;
3833
3834                         skb = skb_peek(&tmp->data_q);
3835                         if (skb->priority < cur_prio)
3836                                 continue;
3837
3838                         if (skb->priority > cur_prio) {
3839                                 num = 0;
3840                                 min = ~0;
3841                                 cur_prio = skb->priority;
3842                         }
3843
3844                         num++;
3845
3846                         if (conn->sent < min) {
3847                                 min  = conn->sent;
3848                                 chan = tmp;
3849                         }
3850                 }
3851
3852                 if (hci_conn_num(hdev, type) == conn_num)
3853                         break;
3854         }
3855
3856         rcu_read_unlock();
3857
3858         if (!chan)
3859                 return NULL;
3860
3861         switch (chan->conn->type) {
3862         case ACL_LINK:
3863                 cnt = hdev->acl_cnt;
3864                 break;
3865         case AMP_LINK:
3866                 cnt = hdev->block_cnt;
3867                 break;
3868         case SCO_LINK:
3869         case ESCO_LINK:
3870                 cnt = hdev->sco_cnt;
3871                 break;
3872         case LE_LINK:
3873                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3874                 break;
3875         default:
3876                 cnt = 0;
3877                 BT_ERR("Unknown link type");
3878         }
3879
3880         q = cnt / num;
3881         *quote = q ? q : 1;
3882         BT_DBG("chan %p quote %d", chan, *quote);
3883         return chan;
3884 }
3885
3886 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3887 {
3888         struct hci_conn_hash *h = &hdev->conn_hash;
3889         struct hci_conn *conn;
3890         int num = 0;
3891
3892         BT_DBG("%s", hdev->name);
3893
3894         rcu_read_lock();
3895
3896         list_for_each_entry_rcu(conn, &h->list, list) {
3897                 struct hci_chan *chan;
3898
3899                 if (conn->type != type)
3900                         continue;
3901
3902                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3903                         continue;
3904
3905                 num++;
3906
3907                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3908                         struct sk_buff *skb;
3909
3910                         if (chan->sent) {
3911                                 chan->sent = 0;
3912                                 continue;
3913                         }
3914
3915                         if (skb_queue_empty(&chan->data_q))
3916                                 continue;
3917
3918                         skb = skb_peek(&chan->data_q);
3919                         if (skb->priority >= HCI_PRIO_MAX - 1)
3920                                 continue;
3921
3922                         skb->priority = HCI_PRIO_MAX - 1;
3923
3924                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3925                                skb->priority);
3926                 }
3927
3928                 if (hci_conn_num(hdev, type) == num)
3929                         break;
3930         }
3931
3932         rcu_read_unlock();
3933
3934 }
3935
3936 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3937 {
3938         /* Calculate count of blocks used by this packet */
3939         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3940 }
3941
3942 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3943 {
3944         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3945                 /* ACL tx timeout must be longer than maximum
3946                  * link supervision timeout (40.9 seconds) */
3947                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3948                                        HCI_ACL_TX_TIMEOUT))
3949                         hci_link_tx_to(hdev, ACL_LINK);
3950         }
3951 }
3952
3953 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3954 {
3955         unsigned int cnt = hdev->acl_cnt;
3956         struct hci_chan *chan;
3957         struct sk_buff *skb;
3958         int quote;
3959
3960         __check_timeout(hdev, cnt);
3961
3962         while (hdev->acl_cnt &&
3963                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3964                 u32 priority = (skb_peek(&chan->data_q))->priority;
3965                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3966                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3967                                skb->len, skb->priority);
3968
3969                         /* Stop if priority has changed */
3970                         if (skb->priority < priority)
3971                                 break;
3972
3973                         skb = skb_dequeue(&chan->data_q);
3974
3975                         hci_conn_enter_active_mode(chan->conn,
3976                                                    bt_cb(skb)->force_active);
3977
3978                         hci_send_frame(hdev, skb);
3979                         hdev->acl_last_tx = jiffies;
3980
3981                         hdev->acl_cnt--;
3982                         chan->sent++;
3983                         chan->conn->sent++;
3984                 }
3985         }
3986
3987         if (cnt != hdev->acl_cnt)
3988                 hci_prio_recalculate(hdev, ACL_LINK);
3989 }
3990
3991 static void hci_sched_acl_blk(struct hci_dev *hdev)
3992 {
3993         unsigned int cnt = hdev->block_cnt;
3994         struct hci_chan *chan;
3995         struct sk_buff *skb;
3996         int quote;
3997         u8 type;
3998
3999         __check_timeout(hdev, cnt);
4000
4001         BT_DBG("%s", hdev->name);
4002
4003         if (hdev->dev_type == HCI_AMP)
4004                 type = AMP_LINK;
4005         else
4006                 type = ACL_LINK;
4007
4008         while (hdev->block_cnt > 0 &&
4009                (chan = hci_chan_sent(hdev, type, &quote))) {
4010                 u32 priority = (skb_peek(&chan->data_q))->priority;
4011                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4012                         int blocks;
4013
4014                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4015                                skb->len, skb->priority);
4016
4017                         /* Stop if priority has changed */
4018                         if (skb->priority < priority)
4019                                 break;
4020
4021                         skb = skb_dequeue(&chan->data_q);
4022
4023                         blocks = __get_blocks(hdev, skb);
4024                         if (blocks > hdev->block_cnt)
4025                                 return;
4026
4027                         hci_conn_enter_active_mode(chan->conn,
4028                                                    bt_cb(skb)->force_active);
4029
4030                         hci_send_frame(hdev, skb);
4031                         hdev->acl_last_tx = jiffies;
4032
4033                         hdev->block_cnt -= blocks;
4034                         quote -= blocks;
4035
4036                         chan->sent += blocks;
4037                         chan->conn->sent += blocks;
4038                 }
4039         }
4040
4041         if (cnt != hdev->block_cnt)
4042                 hci_prio_recalculate(hdev, type);
4043 }
4044
4045 static void hci_sched_acl(struct hci_dev *hdev)
4046 {
4047         BT_DBG("%s", hdev->name);
4048
4049         /* No ACL link over BR/EDR controller */
4050         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4051                 return;
4052
4053         /* No AMP link over AMP controller */
4054         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4055                 return;
4056
4057         switch (hdev->flow_ctl_mode) {
4058         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4059                 hci_sched_acl_pkt(hdev);
4060                 break;
4061
4062         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4063                 hci_sched_acl_blk(hdev);
4064                 break;
4065         }
4066 }
4067
4068 /* Schedule SCO */
4069 static void hci_sched_sco(struct hci_dev *hdev)
4070 {
4071         struct hci_conn *conn;
4072         struct sk_buff *skb;
4073         int quote;
4074
4075         BT_DBG("%s", hdev->name);
4076
4077         if (!hci_conn_num(hdev, SCO_LINK))
4078                 return;
4079
4080         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4081                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4082                         BT_DBG("skb %p len %d", skb, skb->len);
4083                         hci_send_frame(hdev, skb);
4084
4085                         conn->sent++;
4086                         if (conn->sent == ~0)
4087                                 conn->sent = 0;
4088                 }
4089         }
4090 }
4091
4092 static void hci_sched_esco(struct hci_dev *hdev)
4093 {
4094         struct hci_conn *conn;
4095         struct sk_buff *skb;
4096         int quote;
4097
4098         BT_DBG("%s", hdev->name);
4099
4100         if (!hci_conn_num(hdev, ESCO_LINK))
4101                 return;
4102
4103         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4104                                                      &quote))) {
4105                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4106                         BT_DBG("skb %p len %d", skb, skb->len);
4107                         hci_send_frame(hdev, skb);
4108
4109                         conn->sent++;
4110                         if (conn->sent == ~0)
4111                                 conn->sent = 0;
4112                 }
4113         }
4114 }
4115
4116 static void hci_sched_le(struct hci_dev *hdev)
4117 {
4118         struct hci_chan *chan;
4119         struct sk_buff *skb;
4120         int quote, cnt, tmp;
4121
4122         BT_DBG("%s", hdev->name);
4123
4124         if (!hci_conn_num(hdev, LE_LINK))
4125                 return;
4126
4127         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4128                 /* LE tx timeout must be longer than maximum
4129                  * link supervision timeout (40.9 seconds) */
4130                 if (!hdev->le_cnt && hdev->le_pkts &&
4131                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4132                         hci_link_tx_to(hdev, LE_LINK);
4133         }
4134
4135         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4136         tmp = cnt;
4137         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4138                 u32 priority = (skb_peek(&chan->data_q))->priority;
4139                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4140                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4141                                skb->len, skb->priority);
4142
4143                         /* Stop if priority has changed */
4144                         if (skb->priority < priority)
4145                                 break;
4146
4147                         skb = skb_dequeue(&chan->data_q);
4148
4149                         hci_send_frame(hdev, skb);
4150                         hdev->le_last_tx = jiffies;
4151
4152                         cnt--;
4153                         chan->sent++;
4154                         chan->conn->sent++;
4155                 }
4156         }
4157
4158         if (hdev->le_pkts)
4159                 hdev->le_cnt = cnt;
4160         else
4161                 hdev->acl_cnt = cnt;
4162
4163         if (cnt != tmp)
4164                 hci_prio_recalculate(hdev, LE_LINK);
4165 }
4166
4167 static void hci_tx_work(struct work_struct *work)
4168 {
4169         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4170         struct sk_buff *skb;
4171
4172         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4173                hdev->sco_cnt, hdev->le_cnt);
4174
4175         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4176                 /* Schedule queues and send stuff to HCI driver */
4177                 hci_sched_acl(hdev);
4178                 hci_sched_sco(hdev);
4179                 hci_sched_esco(hdev);
4180                 hci_sched_le(hdev);
4181         }
4182
4183         /* Send next queued raw (unknown type) packet */
4184         while ((skb = skb_dequeue(&hdev->raw_q)))
4185                 hci_send_frame(hdev, skb);
4186 }
4187
4188 /* ----- HCI RX task (incoming data processing) ----- */
4189
4190 /* ACL data packet */
4191 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4192 {
4193         struct hci_acl_hdr *hdr = (void *) skb->data;
4194         struct hci_conn *conn;
4195         __u16 handle, flags;
4196
4197         skb_pull(skb, HCI_ACL_HDR_SIZE);
4198
4199         handle = __le16_to_cpu(hdr->handle);
4200         flags  = hci_flags(handle);
4201         handle = hci_handle(handle);
4202
4203         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4204                handle, flags);
4205
4206         hdev->stat.acl_rx++;
4207
4208         hci_dev_lock(hdev);
4209         conn = hci_conn_hash_lookup_handle(hdev, handle);
4210         hci_dev_unlock(hdev);
4211
4212         if (conn) {
4213                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4214
4215                 /* Send to upper protocol */
4216                 l2cap_recv_acldata(conn, skb, flags);
4217                 return;
4218         } else {
4219                 BT_ERR("%s ACL packet for unknown connection handle %d",
4220                        hdev->name, handle);
4221         }
4222
4223         kfree_skb(skb);
4224 }
4225
4226 /* SCO data packet */
4227 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4228 {
4229         struct hci_sco_hdr *hdr = (void *) skb->data;
4230         struct hci_conn *conn;
4231         __u16 handle;
4232
4233         skb_pull(skb, HCI_SCO_HDR_SIZE);
4234
4235         handle = __le16_to_cpu(hdr->handle);
4236
4237         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4238
4239         hdev->stat.sco_rx++;
4240
4241         hci_dev_lock(hdev);
4242         conn = hci_conn_hash_lookup_handle(hdev, handle);
4243         hci_dev_unlock(hdev);
4244
4245         if (conn) {
4246                 /* Send to upper protocol */
4247                 sco_recv_scodata(conn, skb);
4248                 return;
4249         } else {
4250                 BT_ERR("%s SCO packet for unknown connection handle %d",
4251                        hdev->name, handle);
4252         }
4253
4254         kfree_skb(skb);
4255 }
4256
4257 static bool hci_req_is_complete(struct hci_dev *hdev)
4258 {
4259         struct sk_buff *skb;
4260
4261         skb = skb_peek(&hdev->cmd_q);
4262         if (!skb)
4263                 return true;
4264
4265         return bt_cb(skb)->req.start;
4266 }
4267
4268 static void hci_resend_last(struct hci_dev *hdev)
4269 {
4270         struct hci_command_hdr *sent;
4271         struct sk_buff *skb;
4272         u16 opcode;
4273
4274         if (!hdev->sent_cmd)
4275                 return;
4276
4277         sent = (void *) hdev->sent_cmd->data;
4278         opcode = __le16_to_cpu(sent->opcode);
4279         if (opcode == HCI_OP_RESET)
4280                 return;
4281
4282         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4283         if (!skb)
4284                 return;
4285
4286         skb_queue_head(&hdev->cmd_q, skb);
4287         queue_work(hdev->workqueue, &hdev->cmd_work);
4288 }
4289
4290 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4291                           hci_req_complete_t *req_complete,
4292                           hci_req_complete_skb_t *req_complete_skb)
4293 {
4294         struct sk_buff *skb;
4295         unsigned long flags;
4296
4297         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4298
4299         /* If the completed command doesn't match the last one that was
4300          * sent we need to do special handling of it.
4301          */
4302         if (!hci_sent_cmd_data(hdev, opcode)) {
4303                 /* Some CSR based controllers generate a spontaneous
4304                  * reset complete event during init and any pending
4305                  * command will never be completed. In such a case we
4306                  * need to resend whatever was the last sent
4307                  * command.
4308                  */
4309                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4310                         hci_resend_last(hdev);
4311
4312                 return;
4313         }
4314
4315         /* If the command succeeded and there's still more commands in
4316          * this request the request is not yet complete.
4317          */
4318         if (!status && !hci_req_is_complete(hdev))
4319                 return;
4320
4321         /* If this was the last command in a request the complete
4322          * callback would be found in hdev->sent_cmd instead of the
4323          * command queue (hdev->cmd_q).
4324          */
4325         if (bt_cb(hdev->sent_cmd)->req.complete) {
4326                 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4327                 return;
4328         }
4329
4330         if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4331                 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4332                 return;
4333         }
4334
4335         /* Remove all pending commands belonging to this request */
4336         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4337         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4338                 if (bt_cb(skb)->req.start) {
4339                         __skb_queue_head(&hdev->cmd_q, skb);
4340                         break;
4341                 }
4342
4343                 *req_complete = bt_cb(skb)->req.complete;
4344                 *req_complete_skb = bt_cb(skb)->req.complete_skb;
4345                 kfree_skb(skb);
4346         }
4347         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4348 }
4349
4350 static void hci_rx_work(struct work_struct *work)
4351 {
4352         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4353         struct sk_buff *skb;
4354
4355         BT_DBG("%s", hdev->name);
4356
4357         while ((skb = skb_dequeue(&hdev->rx_q))) {
4358                 /* Send copy to monitor */
4359                 hci_send_to_monitor(hdev, skb);
4360
4361                 if (atomic_read(&hdev->promisc)) {
4362                         /* Send copy to the sockets */
4363                         hci_send_to_sock(hdev, skb);
4364                 }
4365
4366                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4367                         kfree_skb(skb);
4368                         continue;
4369                 }
4370
4371                 if (test_bit(HCI_INIT, &hdev->flags)) {
4372                         /* Don't process data packets in this states. */
4373                         switch (bt_cb(skb)->pkt_type) {
4374                         case HCI_ACLDATA_PKT:
4375                         case HCI_SCODATA_PKT:
4376                                 kfree_skb(skb);
4377                                 continue;
4378                         }
4379                 }
4380
4381                 /* Process frame */
4382                 switch (bt_cb(skb)->pkt_type) {
4383                 case HCI_EVENT_PKT:
4384                         BT_DBG("%s Event packet", hdev->name);
4385                         hci_event_packet(hdev, skb);
4386                         break;
4387
4388                 case HCI_ACLDATA_PKT:
4389                         BT_DBG("%s ACL data packet", hdev->name);
4390                         hci_acldata_packet(hdev, skb);
4391                         break;
4392
4393                 case HCI_SCODATA_PKT:
4394                         BT_DBG("%s SCO data packet", hdev->name);
4395                         hci_scodata_packet(hdev, skb);
4396                         break;
4397
4398                 default:
4399                         kfree_skb(skb);
4400                         break;
4401                 }
4402         }
4403 }
4404
4405 static void hci_cmd_work(struct work_struct *work)
4406 {
4407         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4408         struct sk_buff *skb;
4409
4410         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4411                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4412
4413         /* Send queued commands */
4414         if (atomic_read(&hdev->cmd_cnt)) {
4415                 skb = skb_dequeue(&hdev->cmd_q);
4416                 if (!skb)
4417                         return;
4418
4419                 kfree_skb(hdev->sent_cmd);
4420
4421                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4422                 if (hdev->sent_cmd) {
4423                         atomic_dec(&hdev->cmd_cnt);
4424                         hci_send_frame(hdev, skb);
4425                         if (test_bit(HCI_RESET, &hdev->flags))
4426                                 cancel_delayed_work(&hdev->cmd_timer);
4427                         else
4428                                 schedule_delayed_work(&hdev->cmd_timer,
4429                                                       HCI_CMD_TIMEOUT);
4430                 } else {
4431                         skb_queue_head(&hdev->cmd_q, skb);
4432                         queue_work(hdev->workqueue, &hdev->cmd_work);
4433                 }
4434         }
4435 }